1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
  27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
  28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
  36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
  38 #include "memory/gcLocker.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "services/memTracker.hpp"
  44 #include "utilities/vmError.hpp"
  45 
  46 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
  47 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
  48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
  49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
  50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
  51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
  52 
  53 static void trace_gen_sizes(const char* const str,
  54                             size_t og_min, size_t og_max,
  55                             size_t yg_min, size_t yg_max)
  56 {
  57   if (TracePageSizes) {
  58     tty->print_cr("%s:  " SIZE_FORMAT "," SIZE_FORMAT " "
  59                   SIZE_FORMAT "," SIZE_FORMAT " "
  60                   SIZE_FORMAT,
  61                   str,
  62                   og_min / K, og_max / K,
  63                   yg_min / K, yg_max / K,
  64                   (og_max + yg_max) / K);
  65   }
  66 }
  67 
  68 jint ParallelScavengeHeap::initialize() {
  69   CollectedHeap::pre_initialize();
  70 
  71   // Cannot be initialized until after the flags are parsed
  72   // GenerationSizer flag_parser;
  73   _collector_policy = new GenerationSizer();
  74 
  75   size_t yg_min_size = _collector_policy->min_young_gen_size();
  76   size_t yg_max_size = _collector_policy->max_young_gen_size();
  77   size_t og_min_size = _collector_policy->min_old_gen_size();
  78   size_t og_max_size = _collector_policy->max_old_gen_size();
  79 
  80   trace_gen_sizes("ps heap raw",
  81                   og_min_size, og_max_size,
  82                   yg_min_size, yg_max_size);
  83 
  84   const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
  85                                                      yg_max_size + og_max_size,
  86                                                      8);
  87 
  88   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
  89   const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
  90 
  91   // Update sizes to reflect the selected page size(s).
  92   //
  93   // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
  94   // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
  95   // move to the common code.
  96   yg_min_size = align_size_up(yg_min_size, yg_align);
  97   yg_max_size = align_size_up(yg_max_size, yg_align);
  98   size_t yg_cur_size =
  99     align_size_up(_collector_policy->young_gen_size(), yg_align);
 100   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 101 
 102   og_min_size = align_size_up(og_min_size, og_align);
 103   // Align old gen size down to preserve specified heap size.
 104   assert(og_align == yg_align, "sanity");
 105   og_max_size = align_size_down(og_max_size, og_align);
 106   og_max_size = MAX2(og_max_size, og_min_size);
 107   size_t og_cur_size =
 108     align_size_down(_collector_policy->old_gen_size(), og_align);
 109   og_cur_size = MAX2(og_cur_size, og_min_size);
 110 
 111   trace_gen_sizes("ps heap rnd",
 112                   og_min_size, og_max_size,
 113                   yg_min_size, yg_max_size);
 114 
 115   const size_t heap_size = og_max_size + yg_max_size;
 116 
 117   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align);
 118 
 119   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
 120 
 121   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
 122                        og_max_size + yg_max_size, og_page_sz,
 123                        heap_rs.base(),
 124                        heap_rs.size());
 125   if (!heap_rs.is_reserved()) {
 126     vm_shutdown_during_initialization(
 127       "Could not reserve enough space for object heap");
 128     return JNI_ENOMEM;
 129   }
 130 
 131   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 132                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 133 
 134   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
 135   _barrier_set = barrier_set;
 136   oopDesc::set_bs(_barrier_set);
 137   if (_barrier_set == NULL) {
 138     vm_shutdown_during_initialization(
 139       "Could not reserve enough space for barrier set");
 140     return JNI_ENOMEM;
 141   }
 142 
 143   // Initial young gen size is 4 Mb
 144   //
 145   // XXX - what about flag_parser.young_gen_size()?
 146   const size_t init_young_size = align_size_up(4 * M, yg_align);
 147   yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
 148 
 149   // Make up the generations
 150   // Calculate the maximum size that a generation can grow.  This
 151   // includes growth into the other generation.  Note that the
 152   // parameter _max_gen_size is kept as the maximum
 153   // size of the generation as the boundaries currently stand.
 154   // _max_gen_size is still used as that value.
 155   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 156   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 157 
 158   _gens = new AdjoiningGenerations(heap_rs,
 159                                    og_cur_size,
 160                                    og_min_size,
 161                                    og_max_size,
 162                                    yg_cur_size,
 163                                    yg_min_size,
 164                                    yg_max_size,
 165                                    yg_align);
 166 
 167   _old_gen = _gens->old_gen();
 168   _young_gen = _gens->young_gen();
 169 
 170   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 171   const size_t old_capacity = _old_gen->capacity_in_bytes();
 172   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 173   _size_policy =
 174     new PSAdaptiveSizePolicy(eden_capacity,
 175                              initial_promo_size,
 176                              young_gen()->to_space()->capacity_in_bytes(),
 177                              intra_heap_alignment(),
 178                              max_gc_pause_sec,
 179                              max_gc_minor_pause_sec,
 180                              GCTimeRatio
 181                              );
 182 
 183   assert(!UseAdaptiveGCBoundary ||
 184     (old_gen()->virtual_space()->high_boundary() ==
 185      young_gen()->virtual_space()->low_boundary()),
 186     "Boundaries must meet");
 187   // initialize the policy counters - 2 collectors, 3 generations
 188   _gc_policy_counters =
 189     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
 190   _psh = this;
 191 
 192   // Set up the GCTaskManager
 193   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
 194 
 195   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
 196     return JNI_ENOMEM;
 197   }
 198 
 199   return JNI_OK;
 200 }
 201 
 202 void ParallelScavengeHeap::post_initialize() {
 203   // Need to init the tenuring threshold
 204   PSScavenge::initialize();
 205   if (UseParallelOldGC) {
 206     PSParallelCompact::post_initialize();
 207   } else {
 208     PSMarkSweep::initialize();
 209   }
 210   PSPromotionManager::initialize();
 211 }
 212 
 213 void ParallelScavengeHeap::update_counters() {
 214   young_gen()->update_counters();
 215   old_gen()->update_counters();
 216   MetaspaceCounters::update_performance_counters();
 217 }
 218 
 219 size_t ParallelScavengeHeap::capacity() const {
 220   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 221   return value;
 222 }
 223 
 224 size_t ParallelScavengeHeap::used() const {
 225   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 226   return value;
 227 }
 228 
 229 bool ParallelScavengeHeap::is_maximal_no_gc() const {
 230   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
 231 }
 232 
 233 
 234 size_t ParallelScavengeHeap::max_capacity() const {
 235   size_t estimated = reserved_region().byte_size();
 236   if (UseAdaptiveSizePolicy) {
 237     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
 238   } else {
 239     estimated -= young_gen()->to_space()->capacity_in_bytes();
 240   }
 241   return MAX2(estimated, capacity());
 242 }
 243 
 244 bool ParallelScavengeHeap::is_in(const void* p) const {
 245   if (young_gen()->is_in(p)) {
 246     return true;
 247   }
 248 
 249   if (old_gen()->is_in(p)) {
 250     return true;
 251   }
 252 
 253   return false;
 254 }
 255 
 256 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 257   if (young_gen()->is_in_reserved(p)) {
 258     return true;
 259   }
 260 
 261   if (old_gen()->is_in_reserved(p)) {
 262     return true;
 263   }
 264 
 265   return false;
 266 }
 267 
 268 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
 269   return is_in_young((oop)addr);
 270 }
 271 
 272 #ifdef ASSERT
 273 // Don't implement this by using is_in_young().  This method is used
 274 // in some cases to check that is_in_young() is correct.
 275 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
 276   assert(is_in_reserved(p) || p == NULL,
 277     "Does not work if address is non-null and outside of the heap");
 278   // The order of the generations is old (low addr), young (high addr)
 279   return p >= old_gen()->reserved().end();
 280 }
 281 #endif
 282 
 283 // There are two levels of allocation policy here.
 284 //
 285 // When an allocation request fails, the requesting thread must invoke a VM
 286 // operation, transfer control to the VM thread, and await the results of a
 287 // garbage collection. That is quite expensive, and we should avoid doing it
 288 // multiple times if possible.
 289 //
 290 // To accomplish this, we have a basic allocation policy, and also a
 291 // failed allocation policy.
 292 //
 293 // The basic allocation policy controls how you allocate memory without
 294 // attempting garbage collection. It is okay to grab locks and
 295 // expand the heap, if that can be done without coming to a safepoint.
 296 // It is likely that the basic allocation policy will not be very
 297 // aggressive.
 298 //
 299 // The failed allocation policy is invoked from the VM thread after
 300 // the basic allocation policy is unable to satisfy a mem_allocate
 301 // request. This policy needs to cover the entire range of collection,
 302 // heap expansion, and out-of-memory conditions. It should make every
 303 // attempt to allocate the requested memory.
 304 
 305 // Basic allocation policy. Should never be called at a safepoint, or
 306 // from the VM thread.
 307 //
 308 // This method must handle cases where many mem_allocate requests fail
 309 // simultaneously. When that happens, only one VM operation will succeed,
 310 // and the rest will not be executed. For that reason, this method loops
 311 // during failed allocation attempts. If the java heap becomes exhausted,
 312 // we rely on the size_policy object to force a bail out.
 313 HeapWord* ParallelScavengeHeap::mem_allocate(
 314                                      size_t size,
 315                                      bool* gc_overhead_limit_was_exceeded) {
 316   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 317   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 318   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 319 
 320   // In general gc_overhead_limit_was_exceeded should be false so
 321   // set it so here and reset it to true only if the gc time
 322   // limit is being exceeded as checked below.
 323   *gc_overhead_limit_was_exceeded = false;
 324 
 325   HeapWord* result = young_gen()->allocate(size);
 326 
 327   uint loop_count = 0;
 328   uint gc_count = 0;
 329 
 330   while (result == NULL) {
 331     // We don't want to have multiple collections for a single filled generation.
 332     // To prevent this, each thread tracks the total_collections() value, and if
 333     // the count has changed, does not do a new collection.
 334     //
 335     // The collection count must be read only while holding the heap lock. VM
 336     // operations also hold the heap lock during collections. There is a lock
 337     // contention case where thread A blocks waiting on the Heap_lock, while
 338     // thread B is holding it doing a collection. When thread A gets the lock,
 339     // the collection count has already changed. To prevent duplicate collections,
 340     // The policy MUST attempt allocations during the same period it reads the
 341     // total_collections() value!
 342     {
 343       MutexLocker ml(Heap_lock);
 344       gc_count = Universe::heap()->total_collections();
 345 
 346       result = young_gen()->allocate(size);
 347       if (result != NULL) {
 348         return result;
 349       }
 350 
 351       // If certain conditions hold, try allocating from the old gen.
 352       result = mem_allocate_old_gen(size);
 353       if (result != NULL) {
 354         return result;
 355       }
 356 
 357       // Failed to allocate without a gc.
 358       if (GC_locker::is_active_and_needs_gc()) {
 359         // If this thread is not in a jni critical section, we stall
 360         // the requestor until the critical section has cleared and
 361         // GC allowed. When the critical section clears, a GC is
 362         // initiated by the last thread exiting the critical section; so
 363         // we retry the allocation sequence from the beginning of the loop,
 364         // rather than causing more, now probably unnecessary, GC attempts.
 365         JavaThread* jthr = JavaThread::current();
 366         if (!jthr->in_critical()) {
 367           MutexUnlocker mul(Heap_lock);
 368           GC_locker::stall_until_clear();
 369           continue;
 370         } else {
 371           if (CheckJNICalls) {
 372             fatal("Possible deadlock due to allocating while"
 373                   " in jni critical section");
 374           }
 375           return NULL;
 376         }
 377       }
 378     }
 379 
 380     if (result == NULL) {
 381       // Generate a VM operation
 382       VM_ParallelGCFailedAllocation op(size, gc_count);
 383       VMThread::execute(&op);
 384 
 385       // Did the VM operation execute? If so, return the result directly.
 386       // This prevents us from looping until time out on requests that can
 387       // not be satisfied.
 388       if (op.prologue_succeeded()) {
 389         assert(Universe::heap()->is_in_or_null(op.result()),
 390           "result not in heap");
 391 
 392         // If GC was locked out during VM operation then retry allocation
 393         // and/or stall as necessary.
 394         if (op.gc_locked()) {
 395           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 396           continue;  // retry and/or stall as necessary
 397         }
 398 
 399         // Exit the loop if the gc time limit has been exceeded.
 400         // The allocation must have failed above ("result" guarding
 401         // this path is NULL) and the most recent collection has exceeded the
 402         // gc overhead limit (although enough may have been collected to
 403         // satisfy the allocation).  Exit the loop so that an out-of-memory
 404         // will be thrown (return a NULL ignoring the contents of
 405         // op.result()),
 406         // but clear gc_overhead_limit_exceeded so that the next collection
 407         // starts with a clean slate (i.e., forgets about previous overhead
 408         // excesses).  Fill op.result() with a filler object so that the
 409         // heap remains parsable.
 410         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 411         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
 412         assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
 413         if (limit_exceeded && softrefs_clear) {
 414           *gc_overhead_limit_was_exceeded = true;
 415           size_policy()->set_gc_overhead_limit_exceeded(false);
 416           if (PrintGCDetails && Verbose) {
 417             gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
 418               "return NULL because gc_overhead_limit_exceeded is set");
 419           }
 420           if (op.result() != NULL) {
 421             CollectedHeap::fill_with_object(op.result(), size);
 422           }
 423           return NULL;
 424         }
 425 
 426         return op.result();
 427       }
 428     }
 429 
 430     // The policy object will prevent us from looping forever. If the
 431     // time spent in gc crosses a threshold, we will bail out.
 432     loop_count++;
 433     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
 434         (loop_count % QueuedAllocationWarningCount == 0)) {
 435       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
 436               " size=%d", loop_count, size);
 437     }
 438   }
 439 
 440   return result;
 441 }
 442 
 443 // A "death march" is a series of ultra-slow allocations in which a full gc is
 444 // done before each allocation, and after the full gc the allocation still
 445 // cannot be satisfied from the young gen.  This routine detects that condition;
 446 // it should be called after a full gc has been done and the allocation
 447 // attempted from the young gen. The parameter 'addr' should be the result of
 448 // that young gen allocation attempt.
 449 void
 450 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
 451   if (addr != NULL) {
 452     _death_march_count = 0;  // death march has ended
 453   } else if (_death_march_count == 0) {
 454     if (should_alloc_in_eden(size)) {
 455       _death_march_count = 1;    // death march has started
 456     }
 457   }
 458 }
 459 
 460 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
 461   if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
 462     // Size is too big for eden, or gc is locked out.
 463     return old_gen()->allocate(size);
 464   }
 465 
 466   // If a "death march" is in progress, allocate from the old gen a limited
 467   // number of times before doing a GC.
 468   if (_death_march_count > 0) {
 469     if (_death_march_count < 64) {
 470       ++_death_march_count;
 471       return old_gen()->allocate(size);
 472     } else {
 473       _death_march_count = 0;
 474     }
 475   }
 476   return NULL;
 477 }
 478 
 479 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 480   if (UseParallelOldGC) {
 481     // The do_full_collection() parameter clear_all_soft_refs
 482     // is interpreted here as maximum_compaction which will
 483     // cause SoftRefs to be cleared.
 484     bool maximum_compaction = clear_all_soft_refs;
 485     PSParallelCompact::invoke(maximum_compaction);
 486   } else {
 487     PSMarkSweep::invoke(clear_all_soft_refs);
 488   }
 489 }
 490 
 491 // Failed allocation policy. Must be called from the VM thread, and
 492 // only at a safepoint! Note that this method has policy for allocation
 493 // flow, and NOT collection policy. So we do not check for gc collection
 494 // time over limit here, that is the responsibility of the heap specific
 495 // collection methods. This method decides where to attempt allocations,
 496 // and when to attempt collections, but no collection specific policy.
 497 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
 498   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 499   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 500   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 501   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 502 
 503   // We assume that allocation in eden will fail unless we collect.
 504 
 505   // First level allocation failure, scavenge and allocate in young gen.
 506   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 507   const bool invoked_full_gc = PSScavenge::invoke();
 508   HeapWord* result = young_gen()->allocate(size);
 509 
 510   // Second level allocation failure.
 511   //   Mark sweep and allocate in young generation.
 512   if (result == NULL && !invoked_full_gc) {
 513     do_full_collection(false);
 514     result = young_gen()->allocate(size);
 515   }
 516 
 517   death_march_check(result, size);
 518 
 519   // Third level allocation failure.
 520   //   After mark sweep and young generation allocation failure,
 521   //   allocate in old generation.
 522   if (result == NULL) {
 523     result = old_gen()->allocate(size);
 524   }
 525 
 526   // Fourth level allocation failure. We're running out of memory.
 527   //   More complete mark sweep and allocate in young generation.
 528   if (result == NULL) {
 529     do_full_collection(true);
 530     result = young_gen()->allocate(size);
 531   }
 532 
 533   // Fifth level allocation failure.
 534   //   After more complete mark sweep, allocate in old generation.
 535   if (result == NULL) {
 536     result = old_gen()->allocate(size);
 537   }
 538 
 539   return result;
 540 }
 541 
 542 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 543   CollectedHeap::ensure_parsability(retire_tlabs);
 544   young_gen()->eden_space()->ensure_parsability();
 545 }
 546 
 547 size_t ParallelScavengeHeap::unsafe_max_alloc() {
 548   return young_gen()->eden_space()->free_in_bytes();
 549 }
 550 
 551 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 552   return young_gen()->eden_space()->tlab_capacity(thr);
 553 }
 554 
 555 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 556   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 557 }
 558 
 559 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
 560   return young_gen()->allocate(size);
 561 }
 562 
 563 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 564   CollectedHeap::accumulate_statistics_all_tlabs();
 565 }
 566 
 567 void ParallelScavengeHeap::resize_all_tlabs() {
 568   CollectedHeap::resize_all_tlabs();
 569 }
 570 
 571 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
 572   // We don't need barriers for stores to objects in the
 573   // young gen and, a fortiori, for initializing stores to
 574   // objects therein.
 575   return is_in_young(new_obj);
 576 }
 577 
 578 // This method is used by System.gc() and JVMTI.
 579 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 580   assert(!Heap_lock->owned_by_self(),
 581     "this thread should not own the Heap_lock");
 582 
 583   unsigned int gc_count      = 0;
 584   unsigned int full_gc_count = 0;
 585   {
 586     MutexLocker ml(Heap_lock);
 587     // This value is guarded by the Heap_lock
 588     gc_count      = Universe::heap()->total_collections();
 589     full_gc_count = Universe::heap()->total_full_collections();
 590   }
 591 
 592   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 593   VMThread::execute(&op);
 594 }
 595 
 596 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
 597   Unimplemented();
 598 }
 599 
 600 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 601   young_gen()->object_iterate(cl);
 602   old_gen()->object_iterate(cl);
 603 }
 604 
 605 
 606 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 607   if (young_gen()->is_in_reserved(addr)) {
 608     assert(young_gen()->is_in(addr),
 609            "addr should be in allocated part of young gen");
 610     // called from os::print_location by find or VMError
 611     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 612     Unimplemented();
 613   } else if (old_gen()->is_in_reserved(addr)) {
 614     assert(old_gen()->is_in(addr),
 615            "addr should be in allocated part of old gen");
 616     return old_gen()->start_array()->object_start((HeapWord*)addr);
 617   }
 618   return 0;
 619 }
 620 
 621 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
 622   return oop(addr)->size();
 623 }
 624 
 625 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 626   return block_start(addr) == addr;
 627 }
 628 
 629 jlong ParallelScavengeHeap::millis_since_last_gc() {
 630   return UseParallelOldGC ?
 631     PSParallelCompact::millis_since_last_gc() :
 632     PSMarkSweep::millis_since_last_gc();
 633 }
 634 
 635 void ParallelScavengeHeap::prepare_for_verify() {
 636   ensure_parsability(false);  // no need to retire TLABs for verification
 637 }
 638 
 639 void ParallelScavengeHeap::print_on(outputStream* st) const {
 640   young_gen()->print_on(st);
 641   old_gen()->print_on(st);
 642   MetaspaceAux::print_on(st);
 643 }
 644 
 645 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 646   PSScavenge::gc_task_manager()->threads_do(tc);
 647 }
 648 
 649 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
 650   PSScavenge::gc_task_manager()->print_threads_on(st);
 651 }
 652 
 653 void ParallelScavengeHeap::print_tracing_info() const {
 654   if (TraceGen0Time) {
 655     double time = PSScavenge::accumulated_time()->seconds();
 656     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
 657   }
 658   if (TraceGen1Time) {
 659     double time = PSMarkSweep::accumulated_time()->seconds();
 660     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
 661   }
 662 }
 663 
 664 
 665 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
 666   // Why do we need the total_collections()-filter below?
 667   if (total_collections() > 0) {
 668     if (!silent) {
 669       gclog_or_tty->print("tenured ");
 670     }
 671     old_gen()->verify();
 672 
 673     if (!silent) {
 674       gclog_or_tty->print("eden ");
 675     }
 676     young_gen()->verify();
 677   }
 678 }
 679 
 680 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
 681   if (PrintGCDetails && Verbose) {
 682     gclog_or_tty->print(" "  SIZE_FORMAT
 683                         "->" SIZE_FORMAT
 684                         "("  SIZE_FORMAT ")",
 685                         prev_used, used(), capacity());
 686   } else {
 687     gclog_or_tty->print(" "  SIZE_FORMAT "K"
 688                         "->" SIZE_FORMAT "K"
 689                         "("  SIZE_FORMAT "K)",
 690                         prev_used / K, used() / K, capacity() / K);
 691   }
 692 }
 693 
 694 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
 695   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
 696   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
 697   return _psh;
 698 }
 699 
 700 // Before delegating the resize to the young generation,
 701 // the reserved space for the young and old generations
 702 // may be changed to accomodate the desired resize.
 703 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
 704     size_t survivor_size) {
 705   if (UseAdaptiveGCBoundary) {
 706     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 707       size_policy()->reset_bytes_absorbed_from_eden();
 708       return;  // The generation changed size already.
 709     }
 710     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
 711   }
 712 
 713   // Delegate the resize to the generation.
 714   _young_gen->resize(eden_size, survivor_size);
 715 }
 716 
 717 // Before delegating the resize to the old generation,
 718 // the reserved space for the young and old generations
 719 // may be changed to accomodate the desired resize.
 720 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
 721   if (UseAdaptiveGCBoundary) {
 722     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 723       size_policy()->reset_bytes_absorbed_from_eden();
 724       return;  // The generation changed size already.
 725     }
 726     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
 727   }
 728 
 729   // Delegate the resize to the generation.
 730   _old_gen->resize(desired_free_space);
 731 }
 732 
 733 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
 734   // nothing particular
 735 }
 736 
 737 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
 738   // nothing particular
 739 }
 740 
 741 #ifndef PRODUCT
 742 void ParallelScavengeHeap::record_gen_tops_before_GC() {
 743   if (ZapUnusedHeapArea) {
 744     young_gen()->record_spaces_top();
 745     old_gen()->record_spaces_top();
 746   }
 747 }
 748 
 749 void ParallelScavengeHeap::gen_mangle_unused_area() {
 750   if (ZapUnusedHeapArea) {
 751     young_gen()->eden_space()->mangle_unused_area();
 752     young_gen()->to_space()->mangle_unused_area();
 753     young_gen()->from_space()->mangle_unused_area();
 754     old_gen()->object_space()->mangle_unused_area();
 755   }
 756 }
 757 #endif