1 /*
   2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
  27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
  28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
  36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
  38 #include "memory/gcLocker.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/vmError.hpp"
  44 
  45 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
  46 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
  47 PSPermGen*   ParallelScavengeHeap::_perm_gen = NULL;
  48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
  49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
  50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
  51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
  52 
  53 static void trace_gen_sizes(const char* const str,
  54                             size_t pg_min, size_t pg_max,
  55                             size_t og_min, size_t og_max,
  56                             size_t yg_min, size_t yg_max)
  57 {
  58   if (TracePageSizes) {
  59     tty->print_cr("%s:  " SIZE_FORMAT "," SIZE_FORMAT " "
  60                   SIZE_FORMAT "," SIZE_FORMAT " "
  61                   SIZE_FORMAT "," SIZE_FORMAT " "
  62                   SIZE_FORMAT,
  63                   str, pg_min / K, pg_max / K,
  64                   og_min / K, og_max / K,
  65                   yg_min / K, yg_max / K,
  66                   (pg_max + og_max + yg_max) / K);
  67   }
  68 }
  69 
  70 jint ParallelScavengeHeap::initialize() {
  71   CollectedHeap::pre_initialize();
  72 
  73   // Cannot be initialized until after the flags are parsed
  74   // GenerationSizer flag_parser;
  75   _collector_policy = new GenerationSizer();
  76 
  77   size_t yg_min_size = _collector_policy->min_young_gen_size();
  78   size_t yg_max_size = _collector_policy->max_young_gen_size();
  79   size_t og_min_size = _collector_policy->min_old_gen_size();
  80   size_t og_max_size = _collector_policy->max_old_gen_size();
  81   // Why isn't there a min_perm_gen_size()?
  82   size_t pg_min_size = _collector_policy->perm_gen_size();
  83   size_t pg_max_size = _collector_policy->max_perm_gen_size();
  84 
  85   trace_gen_sizes("ps heap raw",
  86                   pg_min_size, pg_max_size,
  87                   og_min_size, og_max_size,
  88                   yg_min_size, yg_max_size);
  89 
  90   // The ReservedSpace ctor used below requires that the page size for the perm
  91   // gen is <= the page size for the rest of the heap (young + old gens).
  92   const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
  93                                                      yg_max_size + og_max_size,
  94                                                      8);
  95   const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
  96                                                           pg_max_size, 16),
  97                                  og_page_sz);
  98 
  99   const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
 100   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
 101   const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
 102 
 103   // Update sizes to reflect the selected page size(s).
 104   //
 105   // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
 106   // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
 107   // move to the common code.
 108   yg_min_size = align_size_up(yg_min_size, yg_align);
 109   yg_max_size = align_size_up(yg_max_size, yg_align);
 110   size_t yg_cur_size =
 111     align_size_up(_collector_policy->young_gen_size(), yg_align);
 112   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 113 
 114   og_min_size = align_size_up(og_min_size, og_align);
 115   // Align old gen size down to preserve specified heap size.
 116   assert(og_align == yg_align, "sanity");
 117   og_max_size = align_size_down(og_max_size, og_align);
 118   og_max_size = MAX2(og_max_size, og_min_size);
 119   size_t og_cur_size =
 120     align_size_down(_collector_policy->old_gen_size(), og_align);
 121   og_cur_size = MAX2(og_cur_size, og_min_size);
 122 
 123   pg_min_size = align_size_up(pg_min_size, pg_align);
 124   pg_max_size = align_size_up(pg_max_size, pg_align);
 125   size_t pg_cur_size = pg_min_size;
 126 
 127   trace_gen_sizes("ps heap rnd",
 128                   pg_min_size, pg_max_size,
 129                   og_min_size, og_max_size,
 130                   yg_min_size, yg_max_size);
 131 
 132   const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
 133   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 134 
 135   // The main part of the heap (old gen + young gen) can often use a larger page
 136   // size than is needed or wanted for the perm gen.  Use the "compound
 137   // alignment" ReservedSpace ctor to avoid having to use the same page size for
 138   // all gens.
 139 
 140   ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
 141                             og_align, addr);
 142 
 143   if (UseCompressedOops) {
 144     if (addr != NULL && !heap_rs.is_reserved()) {
 145       // Failed to reserve at specified address - the requested memory
 146       // region is taken already, for example, by 'java' launcher.
 147       // Try again to reserver heap higher.
 148       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 149       ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
 150                                  og_align, addr);
 151       if (addr != NULL && !heap_rs0.is_reserved()) {
 152         // Failed to reserve at specified address again - give up.
 153         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 154         assert(addr == NULL, "");
 155         ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
 156                                    og_align, addr);
 157         heap_rs = heap_rs1;
 158       } else {
 159         heap_rs = heap_rs0;
 160       }
 161     }
 162   }
 163 
 164   os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
 165                        heap_rs.base(), pg_max_size);
 166   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
 167                        og_max_size + yg_max_size, og_page_sz,
 168                        heap_rs.base() + pg_max_size,
 169                        heap_rs.size() - pg_max_size);
 170   if (!heap_rs.is_reserved()) {
 171     vm_shutdown_during_initialization(
 172       "Could not reserve enough space for object heap");
 173     return JNI_ENOMEM;
 174   }
 175 
 176   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 177                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 178 
 179   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
 180   _barrier_set = barrier_set;
 181   oopDesc::set_bs(_barrier_set);
 182   if (_barrier_set == NULL) {
 183     vm_shutdown_during_initialization(
 184       "Could not reserve enough space for barrier set");
 185     return JNI_ENOMEM;
 186   }
 187 
 188   // Initial young gen size is 4 Mb
 189   //
 190   // XXX - what about flag_parser.young_gen_size()?
 191   const size_t init_young_size = align_size_up(4 * M, yg_align);
 192   yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
 193 
 194   // Split the reserved space into perm gen and the main heap (everything else).
 195   // The main heap uses a different alignment.
 196   ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
 197   ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
 198 
 199   // Make up the generations
 200   // Calculate the maximum size that a generation can grow.  This
 201   // includes growth into the other generation.  Note that the
 202   // parameter _max_gen_size is kept as the maximum
 203   // size of the generation as the boundaries currently stand.
 204   // _max_gen_size is still used as that value.
 205   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 206   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 207 
 208   _gens = new AdjoiningGenerations(main_rs,
 209                                    og_cur_size,
 210                                    og_min_size,
 211                                    og_max_size,
 212                                    yg_cur_size,
 213                                    yg_min_size,
 214                                    yg_max_size,
 215                                    yg_align);
 216 
 217   _old_gen = _gens->old_gen();
 218   _young_gen = _gens->young_gen();
 219 
 220   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 221   const size_t old_capacity = _old_gen->capacity_in_bytes();
 222   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 223   _size_policy =
 224     new PSAdaptiveSizePolicy(eden_capacity,
 225                              initial_promo_size,
 226                              young_gen()->to_space()->capacity_in_bytes(),
 227                              intra_heap_alignment(),
 228                              max_gc_pause_sec,
 229                              max_gc_minor_pause_sec,
 230                              GCTimeRatio
 231                              );
 232 
 233   _perm_gen = new PSPermGen(perm_rs,
 234                             pg_align,
 235                             pg_cur_size,
 236                             pg_cur_size,
 237                             pg_max_size,
 238                             "perm", 2);
 239 
 240   assert(!UseAdaptiveGCBoundary ||
 241     (old_gen()->virtual_space()->high_boundary() ==
 242      young_gen()->virtual_space()->low_boundary()),
 243     "Boundaries must meet");
 244   // initialize the policy counters - 2 collectors, 3 generations
 245   _gc_policy_counters =
 246     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
 247   _psh = this;
 248 
 249   // Set up the GCTaskManager
 250   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
 251 
 252   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
 253     return JNI_ENOMEM;
 254   }
 255 
 256   return JNI_OK;
 257 }
 258 
 259 void ParallelScavengeHeap::post_initialize() {
 260   // Need to init the tenuring threshold
 261   PSScavenge::initialize();
 262   if (UseParallelOldGC) {
 263     PSParallelCompact::post_initialize();
 264   } else {
 265     PSMarkSweep::initialize();
 266   }
 267   PSPromotionManager::initialize();
 268 }
 269 
 270 void ParallelScavengeHeap::update_counters() {
 271   young_gen()->update_counters();
 272   old_gen()->update_counters();
 273   perm_gen()->update_counters();
 274 }
 275 
 276 size_t ParallelScavengeHeap::capacity() const {
 277   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 278   return value;
 279 }
 280 
 281 size_t ParallelScavengeHeap::used() const {
 282   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 283   return value;
 284 }
 285 
 286 bool ParallelScavengeHeap::is_maximal_no_gc() const {
 287   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
 288 }
 289 
 290 
 291 size_t ParallelScavengeHeap::permanent_capacity() const {
 292   return perm_gen()->capacity_in_bytes();
 293 }
 294 
 295 size_t ParallelScavengeHeap::permanent_used() const {
 296   return perm_gen()->used_in_bytes();
 297 }
 298 
 299 size_t ParallelScavengeHeap::max_capacity() const {
 300   size_t estimated = reserved_region().byte_size();
 301   estimated -= perm_gen()->reserved().byte_size();
 302   if (UseAdaptiveSizePolicy) {
 303     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
 304   } else {
 305     estimated -= young_gen()->to_space()->capacity_in_bytes();
 306   }
 307   return MAX2(estimated, capacity());
 308 }
 309 
 310 bool ParallelScavengeHeap::is_in(const void* p) const {
 311   if (young_gen()->is_in(p)) {
 312     return true;
 313   }
 314 
 315   if (old_gen()->is_in(p)) {
 316     return true;
 317   }
 318 
 319   if (perm_gen()->is_in(p)) {
 320     return true;
 321   }
 322 
 323   return false;
 324 }
 325 
 326 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 327   if (young_gen()->is_in_reserved(p)) {
 328     return true;
 329   }
 330 
 331   if (old_gen()->is_in_reserved(p)) {
 332     return true;
 333   }
 334 
 335   if (perm_gen()->is_in_reserved(p)) {
 336     return true;
 337   }
 338 
 339   return false;
 340 }
 341 
 342 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
 343   return is_in_young((oop)addr);
 344 }
 345 
 346 #ifdef ASSERT
 347 // Don't implement this by using is_in_young().  This method is used
 348 // in some cases to check that is_in_young() is correct.
 349 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
 350   assert(is_in_reserved(p) || p == NULL,
 351     "Does not work if address is non-null and outside of the heap");
 352   // The order of the generations is perm (low addr), old, young (high addr)
 353   return p >= old_gen()->reserved().end();
 354 }
 355 #endif
 356 
 357 // There are two levels of allocation policy here.
 358 //
 359 // When an allocation request fails, the requesting thread must invoke a VM
 360 // operation, transfer control to the VM thread, and await the results of a
 361 // garbage collection. That is quite expensive, and we should avoid doing it
 362 // multiple times if possible.
 363 //
 364 // To accomplish this, we have a basic allocation policy, and also a
 365 // failed allocation policy.
 366 //
 367 // The basic allocation policy controls how you allocate memory without
 368 // attempting garbage collection. It is okay to grab locks and
 369 // expand the heap, if that can be done without coming to a safepoint.
 370 // It is likely that the basic allocation policy will not be very
 371 // aggressive.
 372 //
 373 // The failed allocation policy is invoked from the VM thread after
 374 // the basic allocation policy is unable to satisfy a mem_allocate
 375 // request. This policy needs to cover the entire range of collection,
 376 // heap expansion, and out-of-memory conditions. It should make every
 377 // attempt to allocate the requested memory.
 378 
 379 // Basic allocation policy. Should never be called at a safepoint, or
 380 // from the VM thread.
 381 //
 382 // This method must handle cases where many mem_allocate requests fail
 383 // simultaneously. When that happens, only one VM operation will succeed,
 384 // and the rest will not be executed. For that reason, this method loops
 385 // during failed allocation attempts. If the java heap becomes exhausted,
 386 // we rely on the size_policy object to force a bail out.
 387 HeapWord* ParallelScavengeHeap::mem_allocate(
 388                                      size_t size,
 389                                      bool* gc_overhead_limit_was_exceeded) {
 390   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 391   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 392   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 393 
 394   // In general gc_overhead_limit_was_exceeded should be false so
 395   // set it so here and reset it to true only if the gc time
 396   // limit is being exceeded as checked below.
 397   *gc_overhead_limit_was_exceeded = false;
 398 
 399   HeapWord* result = young_gen()->allocate(size);
 400 
 401   uint loop_count = 0;
 402   uint gc_count = 0;
 403 
 404   while (result == NULL) {
 405     // We don't want to have multiple collections for a single filled generation.
 406     // To prevent this, each thread tracks the total_collections() value, and if
 407     // the count has changed, does not do a new collection.
 408     //
 409     // The collection count must be read only while holding the heap lock. VM
 410     // operations also hold the heap lock during collections. There is a lock
 411     // contention case where thread A blocks waiting on the Heap_lock, while
 412     // thread B is holding it doing a collection. When thread A gets the lock,
 413     // the collection count has already changed. To prevent duplicate collections,
 414     // The policy MUST attempt allocations during the same period it reads the
 415     // total_collections() value!
 416     {
 417       MutexLocker ml(Heap_lock);
 418       gc_count = Universe::heap()->total_collections();
 419 
 420       result = young_gen()->allocate(size);
 421 
 422       // (1) If the requested object is too large to easily fit in the
 423       //     young_gen, or
 424       // (2) If GC is locked out via GCLocker, young gen is full and
 425       //     the need for a GC already signalled to GCLocker (done
 426       //     at a safepoint),
 427       // ... then, rather than force a safepoint and (a potentially futile)
 428       // collection (attempt) for each allocation, try allocation directly
 429       // in old_gen. For case (2) above, we may in the future allow
 430       // TLAB allocation directly in the old gen.
 431       if (result != NULL) {
 432         return result;
 433       }
 434       if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
 435         result = old_gen()->allocate(size);
 436         if (result != NULL) {
 437           return result;
 438         }
 439       }
 440       if (GC_locker::is_active_and_needs_gc()) {
 441         // If this thread is not in a jni critical section, we stall
 442         // the requestor until the critical section has cleared and
 443         // GC allowed. When the critical section clears, a GC is
 444         // initiated by the last thread exiting the critical section; so
 445         // we retry the allocation sequence from the beginning of the loop,
 446         // rather than causing more, now probably unnecessary, GC attempts.
 447         JavaThread* jthr = JavaThread::current();
 448         if (!jthr->in_critical()) {
 449           MutexUnlocker mul(Heap_lock);
 450           GC_locker::stall_until_clear();
 451           continue;
 452         } else {
 453           if (CheckJNICalls) {
 454             fatal("Possible deadlock due to allocating while"
 455                   " in jni critical section");
 456           }
 457           return NULL;
 458         }
 459       }
 460     }
 461 
 462     if (result == NULL) {
 463 
 464       // Generate a VM operation
 465       VM_ParallelGCFailedAllocation op(size, gc_count);
 466       VMThread::execute(&op);
 467 
 468       // Did the VM operation execute? If so, return the result directly.
 469       // This prevents us from looping until time out on requests that can
 470       // not be satisfied.
 471       if (op.prologue_succeeded()) {
 472         assert(Universe::heap()->is_in_or_null(op.result()),
 473           "result not in heap");
 474 
 475         // If GC was locked out during VM operation then retry allocation
 476         // and/or stall as necessary.
 477         if (op.gc_locked()) {
 478           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 479           continue;  // retry and/or stall as necessary
 480         }
 481 
 482         // Exit the loop if the gc time limit has been exceeded.
 483         // The allocation must have failed above ("result" guarding
 484         // this path is NULL) and the most recent collection has exceeded the
 485         // gc overhead limit (although enough may have been collected to
 486         // satisfy the allocation).  Exit the loop so that an out-of-memory
 487         // will be thrown (return a NULL ignoring the contents of
 488         // op.result()),
 489         // but clear gc_overhead_limit_exceeded so that the next collection
 490         // starts with a clean slate (i.e., forgets about previous overhead
 491         // excesses).  Fill op.result() with a filler object so that the
 492         // heap remains parsable.
 493         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 494         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
 495         assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
 496         if (limit_exceeded && softrefs_clear) {
 497           *gc_overhead_limit_was_exceeded = true;
 498           size_policy()->set_gc_overhead_limit_exceeded(false);
 499           if (PrintGCDetails && Verbose) {
 500             gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
 501               "return NULL because gc_overhead_limit_exceeded is set");
 502           }
 503           if (op.result() != NULL) {
 504             CollectedHeap::fill_with_object(op.result(), size);
 505           }
 506           return NULL;
 507         }
 508 
 509         return op.result();
 510       }
 511     }
 512 
 513     // The policy object will prevent us from looping forever. If the
 514     // time spent in gc crosses a threshold, we will bail out.
 515     loop_count++;
 516     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
 517         (loop_count % QueuedAllocationWarningCount == 0)) {
 518       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
 519               " size=%d", loop_count, size);
 520     }
 521   }
 522 
 523   return result;
 524 }
 525 
 526 // Failed allocation policy. Must be called from the VM thread, and
 527 // only at a safepoint! Note that this method has policy for allocation
 528 // flow, and NOT collection policy. So we do not check for gc collection
 529 // time over limit here, that is the responsibility of the heap specific
 530 // collection methods. This method decides where to attempt allocations,
 531 // and when to attempt collections, but no collection specific policy.
 532 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
 533   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 534   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 535   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 536   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 537 
 538   size_t mark_sweep_invocation_count = total_invocations();
 539 
 540   // We assume (and assert!) that an allocation at this point will fail
 541   // unless we collect.
 542 
 543   // First level allocation failure, scavenge and allocate in young gen.
 544   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 545   PSScavenge::invoke();
 546   HeapWord* result = young_gen()->allocate(size);
 547 
 548   // Second level allocation failure.
 549   //   Mark sweep and allocate in young generation.
 550   if (result == NULL) {
 551     // There is some chance the scavenge method decided to invoke mark_sweep.
 552     // Don't mark sweep twice if so.
 553     if (mark_sweep_invocation_count == total_invocations()) {
 554       invoke_full_gc(false);
 555       result = young_gen()->allocate(size);
 556     }
 557   }
 558 
 559   // Third level allocation failure.
 560   //   After mark sweep and young generation allocation failure,
 561   //   allocate in old generation.
 562   if (result == NULL) {
 563     result = old_gen()->allocate(size);
 564   }
 565 
 566   // Fourth level allocation failure. We're running out of memory.
 567   //   More complete mark sweep and allocate in young generation.
 568   if (result == NULL) {
 569     invoke_full_gc(true);
 570     result = young_gen()->allocate(size);
 571   }
 572 
 573   // Fifth level allocation failure.
 574   //   After more complete mark sweep, allocate in old generation.
 575   if (result == NULL) {
 576     result = old_gen()->allocate(size);
 577   }
 578 
 579   return result;
 580 }
 581 
 582 //
 583 // This is the policy loop for allocating in the permanent generation.
 584 // If the initial allocation fails, we create a vm operation which will
 585 // cause a collection.
 586 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
 587   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 588   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 589   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 590 
 591   HeapWord* result;
 592 
 593   uint loop_count = 0;
 594   uint gc_count = 0;
 595   uint full_gc_count = 0;
 596 
 597   do {
 598     // We don't want to have multiple collections for a single filled generation.
 599     // To prevent this, each thread tracks the total_collections() value, and if
 600     // the count has changed, does not do a new collection.
 601     //
 602     // The collection count must be read only while holding the heap lock. VM
 603     // operations also hold the heap lock during collections. There is a lock
 604     // contention case where thread A blocks waiting on the Heap_lock, while
 605     // thread B is holding it doing a collection. When thread A gets the lock,
 606     // the collection count has already changed. To prevent duplicate collections,
 607     // The policy MUST attempt allocations during the same period it reads the
 608     // total_collections() value!
 609     {
 610       MutexLocker ml(Heap_lock);
 611       gc_count      = Universe::heap()->total_collections();
 612       full_gc_count = Universe::heap()->total_full_collections();
 613 
 614       result = perm_gen()->allocate_permanent(size);
 615 
 616       if (result != NULL) {
 617         return result;
 618       }
 619 
 620       if (GC_locker::is_active_and_needs_gc()) {
 621         // If this thread is not in a jni critical section, we stall
 622         // the requestor until the critical section has cleared and
 623         // GC allowed. When the critical section clears, a GC is
 624         // initiated by the last thread exiting the critical section; so
 625         // we retry the allocation sequence from the beginning of the loop,
 626         // rather than causing more, now probably unnecessary, GC attempts.
 627         JavaThread* jthr = JavaThread::current();
 628         if (!jthr->in_critical()) {
 629           MutexUnlocker mul(Heap_lock);
 630           GC_locker::stall_until_clear();
 631           continue;
 632         } else {
 633           if (CheckJNICalls) {
 634             fatal("Possible deadlock due to allocating while"
 635                   " in jni critical section");
 636           }
 637           return NULL;
 638         }
 639       }
 640     }
 641 
 642     if (result == NULL) {
 643 
 644       // Exit the loop if the gc time limit has been exceeded.
 645       // The allocation must have failed above (result must be NULL),
 646       // and the most recent collection must have exceeded the
 647       // gc time limit.  Exit the loop so that an out-of-memory
 648       // will be thrown (returning a NULL will do that), but
 649       // clear gc_overhead_limit_exceeded so that the next collection
 650       // will succeeded if the applications decides to handle the
 651       // out-of-memory and tries to go on.
 652       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 653       if (limit_exceeded) {
 654         size_policy()->set_gc_overhead_limit_exceeded(false);
 655         if (PrintGCDetails && Verbose) {
 656           gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
 657             " return NULL because gc_overhead_limit_exceeded is set");
 658         }
 659         assert(result == NULL, "Allocation did not fail");
 660         return NULL;
 661       }
 662 
 663       // Generate a VM operation
 664       VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
 665       VMThread::execute(&op);
 666 
 667       // Did the VM operation execute? If so, return the result directly.
 668       // This prevents us from looping until time out on requests that can
 669       // not be satisfied.
 670       if (op.prologue_succeeded()) {
 671         assert(Universe::heap()->is_in_permanent_or_null(op.result()),
 672           "result not in heap");
 673         // If GC was locked out during VM operation then retry allocation
 674         // and/or stall as necessary.
 675         if (op.gc_locked()) {
 676           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 677           continue;  // retry and/or stall as necessary
 678         }
 679         // If a NULL results is being returned, an out-of-memory
 680         // will be thrown now.  Clear the gc_overhead_limit_exceeded
 681         // flag to avoid the following situation.
 682         //      gc_overhead_limit_exceeded is set during a collection
 683         //      the collection fails to return enough space and an OOM is thrown
 684         //      a subsequent GC prematurely throws an out-of-memory because
 685         //        the gc_overhead_limit_exceeded counts did not start
 686         //        again from 0.
 687         if (op.result() == NULL) {
 688           size_policy()->reset_gc_overhead_limit_count();
 689         }
 690         return op.result();
 691       }
 692     }
 693 
 694     // The policy object will prevent us from looping forever. If the
 695     // time spent in gc crosses a threshold, we will bail out.
 696     loop_count++;
 697     if ((QueuedAllocationWarningCount > 0) &&
 698         (loop_count % QueuedAllocationWarningCount == 0)) {
 699       warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
 700               " size=%d", loop_count, size);
 701     }
 702   } while (result == NULL);
 703 
 704   return result;
 705 }
 706 
 707 //
 708 // This is the policy code for permanent allocations which have failed
 709 // and require a collection. Note that just as in failed_mem_allocate,
 710 // we do not set collection policy, only where & when to allocate and
 711 // collect.
 712 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
 713   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 714   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 715   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 716   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 717   assert(size > perm_gen()->free_in_words(), "Allocation should fail");
 718 
 719   // We assume (and assert!) that an allocation at this point will fail
 720   // unless we collect.
 721 
 722   // First level allocation failure.  Mark-sweep and allocate in perm gen.
 723   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 724   invoke_full_gc(false);
 725   HeapWord* result = perm_gen()->allocate_permanent(size);
 726 
 727   // Second level allocation failure. We're running out of memory.
 728   if (result == NULL) {
 729     invoke_full_gc(true);
 730     result = perm_gen()->allocate_permanent(size);
 731   }
 732 
 733   return result;
 734 }
 735 
 736 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 737   CollectedHeap::ensure_parsability(retire_tlabs);
 738   young_gen()->eden_space()->ensure_parsability();
 739 }
 740 
 741 size_t ParallelScavengeHeap::unsafe_max_alloc() {
 742   return young_gen()->eden_space()->free_in_bytes();
 743 }
 744 
 745 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 746   return young_gen()->eden_space()->tlab_capacity(thr);
 747 }
 748 
 749 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 750   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 751 }
 752 
 753 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
 754   return young_gen()->allocate(size);
 755 }
 756 
 757 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 758   CollectedHeap::accumulate_statistics_all_tlabs();
 759 }
 760 
 761 void ParallelScavengeHeap::resize_all_tlabs() {
 762   CollectedHeap::resize_all_tlabs();
 763 }
 764 
 765 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
 766   // We don't need barriers for stores to objects in the
 767   // young gen and, a fortiori, for initializing stores to
 768   // objects therein.
 769   return is_in_young(new_obj);
 770 }
 771 
 772 // This method is used by System.gc() and JVMTI.
 773 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 774   assert(!Heap_lock->owned_by_self(),
 775     "this thread should not own the Heap_lock");
 776 
 777   unsigned int gc_count      = 0;
 778   unsigned int full_gc_count = 0;
 779   {
 780     MutexLocker ml(Heap_lock);
 781     // This value is guarded by the Heap_lock
 782     gc_count      = Universe::heap()->total_collections();
 783     full_gc_count = Universe::heap()->total_full_collections();
 784   }
 785 
 786   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 787   VMThread::execute(&op);
 788 }
 789 
 790 // This interface assumes that it's being called by the
 791 // vm thread. It collects the heap assuming that the
 792 // heap lock is already held and that we are executing in
 793 // the context of the vm thread.
 794 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
 795   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 796   assert(Heap_lock->is_locked(), "Precondition#2");
 797   GCCauseSetter gcs(this, cause);
 798   switch (cause) {
 799     case GCCause::_heap_inspection:
 800     case GCCause::_heap_dump: {
 801       HandleMark hm;
 802       invoke_full_gc(false);
 803       break;
 804     }
 805     default: // XXX FIX ME
 806       ShouldNotReachHere();
 807   }
 808 }
 809 
 810 
 811 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
 812   Unimplemented();
 813 }
 814 
 815 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 816   young_gen()->object_iterate(cl);
 817   old_gen()->object_iterate(cl);
 818   perm_gen()->object_iterate(cl);
 819 }
 820 
 821 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
 822   Unimplemented();
 823 }
 824 
 825 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
 826   perm_gen()->object_iterate(cl);
 827 }
 828 
 829 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 830   if (young_gen()->is_in_reserved(addr)) {
 831     assert(young_gen()->is_in(addr),
 832            "addr should be in allocated part of young gen");
 833     // called from os::print_location by find or VMError
 834     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 835     Unimplemented();
 836   } else if (old_gen()->is_in_reserved(addr)) {
 837     assert(old_gen()->is_in(addr),
 838            "addr should be in allocated part of old gen");
 839     return old_gen()->start_array()->object_start((HeapWord*)addr);
 840   } else if (perm_gen()->is_in_reserved(addr)) {
 841     assert(perm_gen()->is_in(addr),
 842            "addr should be in allocated part of perm gen");
 843     return perm_gen()->start_array()->object_start((HeapWord*)addr);
 844   }
 845   return 0;
 846 }
 847 
 848 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
 849   return oop(addr)->size();
 850 }
 851 
 852 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 853   return block_start(addr) == addr;
 854 }
 855 
 856 jlong ParallelScavengeHeap::millis_since_last_gc() {
 857   return UseParallelOldGC ?
 858     PSParallelCompact::millis_since_last_gc() :
 859     PSMarkSweep::millis_since_last_gc();
 860 }
 861 
 862 void ParallelScavengeHeap::prepare_for_verify() {
 863   ensure_parsability(false);  // no need to retire TLABs for verification
 864 }
 865 
 866 void ParallelScavengeHeap::print() const { print_on(tty); }
 867 
 868 void ParallelScavengeHeap::print_on(outputStream* st) const {
 869   young_gen()->print_on(st);
 870   old_gen()->print_on(st);
 871   perm_gen()->print_on(st);
 872 }
 873 
 874 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 875   PSScavenge::gc_task_manager()->threads_do(tc);
 876 }
 877 
 878 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
 879   PSScavenge::gc_task_manager()->print_threads_on(st);
 880 }
 881 
 882 void ParallelScavengeHeap::print_tracing_info() const {
 883   if (TraceGen0Time) {
 884     double time = PSScavenge::accumulated_time()->seconds();
 885     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
 886   }
 887   if (TraceGen1Time) {
 888     double time = PSMarkSweep::accumulated_time()->seconds();
 889     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
 890   }
 891 }
 892 
 893 
 894 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
 895   // Why do we need the total_collections()-filter below?
 896   if (total_collections() > 0) {
 897     if (!silent) {
 898       gclog_or_tty->print("permanent ");
 899     }
 900     perm_gen()->verify(allow_dirty);
 901 
 902     if (!silent) {
 903       gclog_or_tty->print("tenured ");
 904     }
 905     old_gen()->verify(allow_dirty);
 906 
 907     if (!silent) {
 908       gclog_or_tty->print("eden ");
 909     }
 910     young_gen()->verify(allow_dirty);
 911   }
 912 }
 913 
 914 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
 915   if (PrintGCDetails && Verbose) {
 916     gclog_or_tty->print(" "  SIZE_FORMAT
 917                         "->" SIZE_FORMAT
 918                         "("  SIZE_FORMAT ")",
 919                         prev_used, used(), capacity());
 920   } else {
 921     gclog_or_tty->print(" "  SIZE_FORMAT "K"
 922                         "->" SIZE_FORMAT "K"
 923                         "("  SIZE_FORMAT "K)",
 924                         prev_used / K, used() / K, capacity() / K);
 925   }
 926 }
 927 
 928 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
 929   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
 930   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
 931   return _psh;
 932 }
 933 
 934 // Before delegating the resize to the young generation,
 935 // the reserved space for the young and old generations
 936 // may be changed to accomodate the desired resize.
 937 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
 938     size_t survivor_size) {
 939   if (UseAdaptiveGCBoundary) {
 940     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 941       size_policy()->reset_bytes_absorbed_from_eden();
 942       return;  // The generation changed size already.
 943     }
 944     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
 945   }
 946 
 947   // Delegate the resize to the generation.
 948   _young_gen->resize(eden_size, survivor_size);
 949 }
 950 
 951 // Before delegating the resize to the old generation,
 952 // the reserved space for the young and old generations
 953 // may be changed to accomodate the desired resize.
 954 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
 955   if (UseAdaptiveGCBoundary) {
 956     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 957       size_policy()->reset_bytes_absorbed_from_eden();
 958       return;  // The generation changed size already.
 959     }
 960     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
 961   }
 962 
 963   // Delegate the resize to the generation.
 964   _old_gen->resize(desired_free_space);
 965 }
 966 
 967 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
 968   // nothing particular
 969 }
 970 
 971 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
 972   // nothing particular
 973 }
 974 
 975 #ifndef PRODUCT
 976 void ParallelScavengeHeap::record_gen_tops_before_GC() {
 977   if (ZapUnusedHeapArea) {
 978     young_gen()->record_spaces_top();
 979     old_gen()->record_spaces_top();
 980     perm_gen()->record_spaces_top();
 981   }
 982 }
 983 
 984 void ParallelScavengeHeap::gen_mangle_unused_area() {
 985   if (ZapUnusedHeapArea) {
 986     young_gen()->eden_space()->mangle_unused_area();
 987     young_gen()->to_space()->mangle_unused_area();
 988     young_gen()->from_space()->mangle_unused_area();
 989     old_gen()->object_space()->mangle_unused_area();
 990     perm_gen()->object_space()->mangle_unused_area();
 991   }
 992 }
 993 #endif