1 /*
   2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_parallelScavengeHeap.cpp.incl"
  27 
  28 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
  29 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
  30 PSPermGen*   ParallelScavengeHeap::_perm_gen = NULL;
  31 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
  32 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
  33 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
  34 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
  35 
  36 static void trace_gen_sizes(const char* const str,
  37                             size_t pg_min, size_t pg_max,
  38                             size_t og_min, size_t og_max,
  39                             size_t yg_min, size_t yg_max)
  40 {
  41   if (TracePageSizes) {
  42     tty->print_cr("%s:  " SIZE_FORMAT "," SIZE_FORMAT " "
  43                   SIZE_FORMAT "," SIZE_FORMAT " "
  44                   SIZE_FORMAT "," SIZE_FORMAT " "
  45                   SIZE_FORMAT,
  46                   str, pg_min / K, pg_max / K,
  47                   og_min / K, og_max / K,
  48                   yg_min / K, yg_max / K,
  49                   (pg_max + og_max + yg_max) / K);
  50   }
  51 }
  52 
  53 jint ParallelScavengeHeap::initialize() {
  54   CollectedHeap::pre_initialize();
  55 
  56   // Cannot be initialized until after the flags are parsed
  57   GenerationSizer flag_parser;
  58 
  59   size_t yg_min_size = flag_parser.min_young_gen_size();
  60   size_t yg_max_size = flag_parser.max_young_gen_size();
  61   size_t og_min_size = flag_parser.min_old_gen_size();
  62   size_t og_max_size = flag_parser.max_old_gen_size();
  63   // Why isn't there a min_perm_gen_size()?
  64   size_t pg_min_size = flag_parser.perm_gen_size();
  65   size_t pg_max_size = flag_parser.max_perm_gen_size();
  66 
  67   trace_gen_sizes("ps heap raw",
  68                   pg_min_size, pg_max_size,
  69                   og_min_size, og_max_size,
  70                   yg_min_size, yg_max_size);
  71 
  72   // The ReservedSpace ctor used below requires that the page size for the perm
  73   // gen is <= the page size for the rest of the heap (young + old gens).
  74   const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
  75                                                      yg_max_size + og_max_size,
  76                                                      8);
  77   const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
  78                                                           pg_max_size, 16),
  79                                  og_page_sz);
  80 
  81   const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
  82   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
  83   const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
  84 
  85   // Update sizes to reflect the selected page size(s).
  86   //
  87   // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
  88   // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
  89   // move to the common code.
  90   yg_min_size = align_size_up(yg_min_size, yg_align);
  91   yg_max_size = align_size_up(yg_max_size, yg_align);
  92   size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
  93   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
  94 
  95   og_min_size = align_size_up(og_min_size, og_align);
  96   og_max_size = align_size_up(og_max_size, og_align);
  97   size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
  98   og_cur_size = MAX2(og_cur_size, og_min_size);
  99 
 100   pg_min_size = align_size_up(pg_min_size, pg_align);
 101   pg_max_size = align_size_up(pg_max_size, pg_align);
 102   size_t pg_cur_size = pg_min_size;
 103 
 104   trace_gen_sizes("ps heap rnd",
 105                   pg_min_size, pg_max_size,
 106                   og_min_size, og_max_size,
 107                   yg_min_size, yg_max_size);
 108 
 109   const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
 110   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 111 
 112   // The main part of the heap (old gen + young gen) can often use a larger page
 113   // size than is needed or wanted for the perm gen.  Use the "compound
 114   // alignment" ReservedSpace ctor to avoid having to use the same page size for
 115   // all gens.
 116 
 117   ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
 118                             og_align, addr);
 119 
 120   if (UseCompressedOops) {
 121     if (addr != NULL && !heap_rs.is_reserved()) {
 122       // Failed to reserve at specified address - the requested memory
 123       // region is taken already, for example, by 'java' launcher.
 124       // Try again to reserver heap higher.
 125       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 126       ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
 127                                  og_align, addr);
 128       if (addr != NULL && !heap_rs0.is_reserved()) {
 129         // Failed to reserve at specified address again - give up.
 130         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 131         assert(addr == NULL, "");
 132         ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
 133                                    og_align, addr);
 134         heap_rs = heap_rs1;
 135       } else {
 136         heap_rs = heap_rs0;
 137       }
 138     }
 139   }
 140 
 141   os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
 142                        heap_rs.base(), pg_max_size);
 143   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
 144                        og_max_size + yg_max_size, og_page_sz,
 145                        heap_rs.base() + pg_max_size,
 146                        heap_rs.size() - pg_max_size);
 147   if (!heap_rs.is_reserved()) {
 148     vm_shutdown_during_initialization(
 149       "Could not reserve enough space for object heap");
 150     return JNI_ENOMEM;
 151   }
 152 
 153   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 154                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 155 
 156   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
 157   _barrier_set = barrier_set;
 158   oopDesc::set_bs(_barrier_set);
 159   if (_barrier_set == NULL) {
 160     vm_shutdown_during_initialization(
 161       "Could not reserve enough space for barrier set");
 162     return JNI_ENOMEM;
 163   }
 164 
 165   // Initial young gen size is 4 Mb
 166   //
 167   // XXX - what about flag_parser.young_gen_size()?
 168   const size_t init_young_size = align_size_up(4 * M, yg_align);
 169   yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
 170 
 171   // Split the reserved space into perm gen and the main heap (everything else).
 172   // The main heap uses a different alignment.
 173   ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
 174   ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
 175 
 176   // Make up the generations
 177   // Calculate the maximum size that a generation can grow.  This
 178   // includes growth into the other generation.  Note that the
 179   // parameter _max_gen_size is kept as the maximum
 180   // size of the generation as the boundaries currently stand.
 181   // _max_gen_size is still used as that value.
 182   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 183   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 184 
 185   _gens = new AdjoiningGenerations(main_rs,
 186                                    og_cur_size,
 187                                    og_min_size,
 188                                    og_max_size,
 189                                    yg_cur_size,
 190                                    yg_min_size,
 191                                    yg_max_size,
 192                                    yg_align);
 193 
 194   _old_gen = _gens->old_gen();
 195   _young_gen = _gens->young_gen();
 196 
 197   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 198   const size_t old_capacity = _old_gen->capacity_in_bytes();
 199   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 200   _size_policy =
 201     new PSAdaptiveSizePolicy(eden_capacity,
 202                              initial_promo_size,
 203                              young_gen()->to_space()->capacity_in_bytes(),
 204                              intra_heap_alignment(),
 205                              max_gc_pause_sec,
 206                              max_gc_minor_pause_sec,
 207                              GCTimeRatio
 208                              );
 209 
 210   _perm_gen = new PSPermGen(perm_rs,
 211                             pg_align,
 212                             pg_cur_size,
 213                             pg_cur_size,
 214                             pg_max_size,
 215                             "perm", 2);
 216 
 217   assert(!UseAdaptiveGCBoundary ||
 218     (old_gen()->virtual_space()->high_boundary() ==
 219      young_gen()->virtual_space()->low_boundary()),
 220     "Boundaries must meet");
 221   // initialize the policy counters - 2 collectors, 3 generations
 222   _gc_policy_counters =
 223     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
 224   _psh = this;
 225 
 226   // Set up the GCTaskManager
 227   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
 228 
 229   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
 230     return JNI_ENOMEM;
 231   }
 232 
 233   return JNI_OK;
 234 }
 235 
 236 void ParallelScavengeHeap::post_initialize() {
 237   // Need to init the tenuring threshold
 238   PSScavenge::initialize();
 239   if (UseParallelOldGC) {
 240     PSParallelCompact::post_initialize();
 241   } else {
 242     PSMarkSweep::initialize();
 243   }
 244   PSPromotionManager::initialize();
 245 }
 246 
 247 void ParallelScavengeHeap::update_counters() {
 248   young_gen()->update_counters();
 249   old_gen()->update_counters();
 250   perm_gen()->update_counters();
 251 }
 252 
 253 size_t ParallelScavengeHeap::capacity() const {
 254   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 255   return value;
 256 }
 257 
 258 size_t ParallelScavengeHeap::used() const {
 259   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 260   return value;
 261 }
 262 
 263 bool ParallelScavengeHeap::is_maximal_no_gc() const {
 264   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
 265 }
 266 
 267 
 268 size_t ParallelScavengeHeap::permanent_capacity() const {
 269   return perm_gen()->capacity_in_bytes();
 270 }
 271 
 272 size_t ParallelScavengeHeap::permanent_used() const {
 273   return perm_gen()->used_in_bytes();
 274 }
 275 
 276 size_t ParallelScavengeHeap::max_capacity() const {
 277   size_t estimated = reserved_region().byte_size();
 278   estimated -= perm_gen()->reserved().byte_size();
 279   if (UseAdaptiveSizePolicy) {
 280     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
 281   } else {
 282     estimated -= young_gen()->to_space()->capacity_in_bytes();
 283   }
 284   return MAX2(estimated, capacity());
 285 }
 286 
 287 bool ParallelScavengeHeap::is_in(const void* p) const {
 288   if (young_gen()->is_in(p)) {
 289     return true;
 290   }
 291 
 292   if (old_gen()->is_in(p)) {
 293     return true;
 294   }
 295 
 296   if (perm_gen()->is_in(p)) {
 297     return true;
 298   }
 299 
 300   return false;
 301 }
 302 
 303 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 304   if (young_gen()->is_in_reserved(p)) {
 305     return true;
 306   }
 307 
 308   if (old_gen()->is_in_reserved(p)) {
 309     return true;
 310   }
 311 
 312   if (perm_gen()->is_in_reserved(p)) {
 313     return true;
 314   }
 315 
 316   return false;
 317 }
 318 
 319 // There are two levels of allocation policy here.
 320 //
 321 // When an allocation request fails, the requesting thread must invoke a VM
 322 // operation, transfer control to the VM thread, and await the results of a
 323 // garbage collection. That is quite expensive, and we should avoid doing it
 324 // multiple times if possible.
 325 //
 326 // To accomplish this, we have a basic allocation policy, and also a
 327 // failed allocation policy.
 328 //
 329 // The basic allocation policy controls how you allocate memory without
 330 // attempting garbage collection. It is okay to grab locks and
 331 // expand the heap, if that can be done without coming to a safepoint.
 332 // It is likely that the basic allocation policy will not be very
 333 // aggressive.
 334 //
 335 // The failed allocation policy is invoked from the VM thread after
 336 // the basic allocation policy is unable to satisfy a mem_allocate
 337 // request. This policy needs to cover the entire range of collection,
 338 // heap expansion, and out-of-memory conditions. It should make every
 339 // attempt to allocate the requested memory.
 340 
 341 // Basic allocation policy. Should never be called at a safepoint, or
 342 // from the VM thread.
 343 //
 344 // This method must handle cases where many mem_allocate requests fail
 345 // simultaneously. When that happens, only one VM operation will succeed,
 346 // and the rest will not be executed. For that reason, this method loops
 347 // during failed allocation attempts. If the java heap becomes exhausted,
 348 // we rely on the size_policy object to force a bail out.
 349 HeapWord* ParallelScavengeHeap::mem_allocate(
 350                                      size_t size,
 351                                      bool is_noref,
 352                                      bool is_tlab,
 353                                      bool* gc_overhead_limit_was_exceeded) {
 354   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 355   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 356   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 357 
 358   HeapWord* result = young_gen()->allocate(size, is_tlab);
 359 
 360   uint loop_count = 0;
 361   uint gc_count = 0;
 362 
 363   while (result == NULL) {
 364     // We don't want to have multiple collections for a single filled generation.
 365     // To prevent this, each thread tracks the total_collections() value, and if
 366     // the count has changed, does not do a new collection.
 367     //
 368     // The collection count must be read only while holding the heap lock. VM
 369     // operations also hold the heap lock during collections. There is a lock
 370     // contention case where thread A blocks waiting on the Heap_lock, while
 371     // thread B is holding it doing a collection. When thread A gets the lock,
 372     // the collection count has already changed. To prevent duplicate collections,
 373     // The policy MUST attempt allocations during the same period it reads the
 374     // total_collections() value!
 375     {
 376       MutexLocker ml(Heap_lock);
 377       gc_count = Universe::heap()->total_collections();
 378 
 379       result = young_gen()->allocate(size, is_tlab);
 380 
 381       // (1) If the requested object is too large to easily fit in the
 382       //     young_gen, or
 383       // (2) If GC is locked out via GCLocker, young gen is full and
 384       //     the need for a GC already signalled to GCLocker (done
 385       //     at a safepoint),
 386       // ... then, rather than force a safepoint and (a potentially futile)
 387       // collection (attempt) for each allocation, try allocation directly
 388       // in old_gen. For case (2) above, we may in the future allow
 389       // TLAB allocation directly in the old gen.
 390       if (result != NULL) {
 391         return result;
 392       }
 393       if (!is_tlab &&
 394           size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
 395         result = old_gen()->allocate(size, is_tlab);
 396         if (result != NULL) {
 397           return result;
 398         }
 399       }
 400       if (GC_locker::is_active_and_needs_gc()) {
 401         // GC is locked out. If this is a TLAB allocation,
 402         // return NULL; the requestor will retry allocation
 403         // of an idividual object at a time.
 404         if (is_tlab) {
 405           return NULL;
 406         }
 407 
 408         // If this thread is not in a jni critical section, we stall
 409         // the requestor until the critical section has cleared and
 410         // GC allowed. When the critical section clears, a GC is
 411         // initiated by the last thread exiting the critical section; so
 412         // we retry the allocation sequence from the beginning of the loop,
 413         // rather than causing more, now probably unnecessary, GC attempts.
 414         JavaThread* jthr = JavaThread::current();
 415         if (!jthr->in_critical()) {
 416           MutexUnlocker mul(Heap_lock);
 417           GC_locker::stall_until_clear();
 418           continue;
 419         } else {
 420           if (CheckJNICalls) {
 421             fatal("Possible deadlock due to allocating while"
 422                   " in jni critical section");
 423           }
 424           return NULL;
 425         }
 426       }
 427     }
 428 
 429     if (result == NULL) {
 430 
 431       // Exit the loop if if the gc time limit has been exceeded.
 432       // The allocation must have failed above (result must be NULL),
 433       // and the most recent collection must have exceeded the
 434       // gc time limit.  Exit the loop so that an out-of-memory
 435       // will be thrown (returning a NULL will do that), but
 436       // clear gc_time_limit_exceeded so that the next collection
 437       // will succeeded if the applications decides to handle the
 438       // out-of-memory and tries to go on.
 439       *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
 440       if (size_policy()->gc_time_limit_exceeded()) {
 441         size_policy()->set_gc_time_limit_exceeded(false);
 442         if (PrintGCDetails && Verbose) {
 443         gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
 444           "return NULL because gc_time_limit_exceeded is set");
 445         }
 446         return NULL;
 447       }
 448 
 449       // Generate a VM operation
 450       VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
 451       VMThread::execute(&op);
 452 
 453       // Did the VM operation execute? If so, return the result directly.
 454       // This prevents us from looping until time out on requests that can
 455       // not be satisfied.
 456       if (op.prologue_succeeded()) {
 457         assert(Universe::heap()->is_in_or_null(op.result()),
 458           "result not in heap");
 459 
 460         // If GC was locked out during VM operation then retry allocation
 461         // and/or stall as necessary.
 462         if (op.gc_locked()) {
 463           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 464           continue;  // retry and/or stall as necessary
 465         }
 466         // If a NULL result is being returned, an out-of-memory
 467         // will be thrown now.  Clear the gc_time_limit_exceeded
 468         // flag to avoid the following situation.
 469         //      gc_time_limit_exceeded is set during a collection
 470         //      the collection fails to return enough space and an OOM is thrown
 471         //      the next GC is skipped because the gc_time_limit_exceeded
 472         //        flag is set and another OOM is thrown
 473         if (op.result() == NULL) {
 474           size_policy()->set_gc_time_limit_exceeded(false);
 475         }
 476         return op.result();
 477       }
 478     }
 479 
 480     // The policy object will prevent us from looping forever. If the
 481     // time spent in gc crosses a threshold, we will bail out.
 482     loop_count++;
 483     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
 484         (loop_count % QueuedAllocationWarningCount == 0)) {
 485       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
 486               " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
 487     }
 488   }
 489 
 490   return result;
 491 }
 492 
 493 // Failed allocation policy. Must be called from the VM thread, and
 494 // only at a safepoint! Note that this method has policy for allocation
 495 // flow, and NOT collection policy. So we do not check for gc collection
 496 // time over limit here, that is the responsibility of the heap specific
 497 // collection methods. This method decides where to attempt allocations,
 498 // and when to attempt collections, but no collection specific policy.
 499 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
 500   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 501   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 502   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 503   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 504 
 505   size_t mark_sweep_invocation_count = total_invocations();
 506 
 507   // We assume (and assert!) that an allocation at this point will fail
 508   // unless we collect.
 509 
 510   // First level allocation failure, scavenge and allocate in young gen.
 511   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 512   PSScavenge::invoke();
 513   HeapWord* result = young_gen()->allocate(size, is_tlab);
 514 
 515   // Second level allocation failure.
 516   //   Mark sweep and allocate in young generation.
 517   if (result == NULL) {
 518     // There is some chance the scavenge method decided to invoke mark_sweep.
 519     // Don't mark sweep twice if so.
 520     if (mark_sweep_invocation_count == total_invocations()) {
 521       invoke_full_gc(false);
 522       result = young_gen()->allocate(size, is_tlab);
 523     }
 524   }
 525 
 526   // Third level allocation failure.
 527   //   After mark sweep and young generation allocation failure,
 528   //   allocate in old generation.
 529   if (result == NULL && !is_tlab) {
 530     result = old_gen()->allocate(size, is_tlab);
 531   }
 532 
 533   // Fourth level allocation failure. We're running out of memory.
 534   //   More complete mark sweep and allocate in young generation.
 535   if (result == NULL) {
 536     invoke_full_gc(true);
 537     result = young_gen()->allocate(size, is_tlab);
 538   }
 539 
 540   // Fifth level allocation failure.
 541   //   After more complete mark sweep, allocate in old generation.
 542   if (result == NULL && !is_tlab) {
 543     result = old_gen()->allocate(size, is_tlab);
 544   }
 545 
 546   return result;
 547 }
 548 
 549 //
 550 // This is the policy loop for allocating in the permanent generation.
 551 // If the initial allocation fails, we create a vm operation which will
 552 // cause a collection.
 553 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
 554   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 555   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 556   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 557 
 558   HeapWord* result;
 559 
 560   uint loop_count = 0;
 561   uint gc_count = 0;
 562   uint full_gc_count = 0;
 563 
 564   do {
 565     // We don't want to have multiple collections for a single filled generation.
 566     // To prevent this, each thread tracks the total_collections() value, and if
 567     // the count has changed, does not do a new collection.
 568     //
 569     // The collection count must be read only while holding the heap lock. VM
 570     // operations also hold the heap lock during collections. There is a lock
 571     // contention case where thread A blocks waiting on the Heap_lock, while
 572     // thread B is holding it doing a collection. When thread A gets the lock,
 573     // the collection count has already changed. To prevent duplicate collections,
 574     // The policy MUST attempt allocations during the same period it reads the
 575     // total_collections() value!
 576     {
 577       MutexLocker ml(Heap_lock);
 578       gc_count      = Universe::heap()->total_collections();
 579       full_gc_count = Universe::heap()->total_full_collections();
 580 
 581       result = perm_gen()->allocate_permanent(size);
 582 
 583       if (result != NULL) {
 584         return result;
 585       }
 586 
 587       if (GC_locker::is_active_and_needs_gc()) {
 588         // If this thread is not in a jni critical section, we stall
 589         // the requestor until the critical section has cleared and
 590         // GC allowed. When the critical section clears, a GC is
 591         // initiated by the last thread exiting the critical section; so
 592         // we retry the allocation sequence from the beginning of the loop,
 593         // rather than causing more, now probably unnecessary, GC attempts.
 594         JavaThread* jthr = JavaThread::current();
 595         if (!jthr->in_critical()) {
 596           MutexUnlocker mul(Heap_lock);
 597           GC_locker::stall_until_clear();
 598           continue;
 599         } else {
 600           if (CheckJNICalls) {
 601             fatal("Possible deadlock due to allocating while"
 602                   " in jni critical section");
 603           }
 604           return NULL;
 605         }
 606       }
 607     }
 608 
 609     if (result == NULL) {
 610 
 611       // Exit the loop if the gc time limit has been exceeded.
 612       // The allocation must have failed above (result must be NULL),
 613       // and the most recent collection must have exceeded the
 614       // gc time limit.  Exit the loop so that an out-of-memory
 615       // will be thrown (returning a NULL will do that), but
 616       // clear gc_time_limit_exceeded so that the next collection
 617       // will succeeded if the applications decides to handle the
 618       // out-of-memory and tries to go on.
 619       if (size_policy()->gc_time_limit_exceeded()) {
 620         size_policy()->set_gc_time_limit_exceeded(false);
 621         if (PrintGCDetails && Verbose) {
 622         gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
 623           "return NULL because gc_time_limit_exceeded is set");
 624         }
 625         assert(result == NULL, "Allocation did not fail");
 626         return NULL;
 627       }
 628 
 629       // Generate a VM operation
 630       VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
 631       VMThread::execute(&op);
 632 
 633       // Did the VM operation execute? If so, return the result directly.
 634       // This prevents us from looping until time out on requests that can
 635       // not be satisfied.
 636       if (op.prologue_succeeded()) {
 637         assert(Universe::heap()->is_in_permanent_or_null(op.result()),
 638           "result not in heap");
 639         // If GC was locked out during VM operation then retry allocation
 640         // and/or stall as necessary.
 641         if (op.gc_locked()) {
 642           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 643           continue;  // retry and/or stall as necessary
 644         }
 645         // If a NULL results is being returned, an out-of-memory
 646         // will be thrown now.  Clear the gc_time_limit_exceeded
 647         // flag to avoid the following situation.
 648         //      gc_time_limit_exceeded is set during a collection
 649         //      the collection fails to return enough space and an OOM is thrown
 650         //      the next GC is skipped because the gc_time_limit_exceeded
 651         //        flag is set and another OOM is thrown
 652         if (op.result() == NULL) {
 653           size_policy()->set_gc_time_limit_exceeded(false);
 654         }
 655         return op.result();
 656       }
 657     }
 658 
 659     // The policy object will prevent us from looping forever. If the
 660     // time spent in gc crosses a threshold, we will bail out.
 661     loop_count++;
 662     if ((QueuedAllocationWarningCount > 0) &&
 663         (loop_count % QueuedAllocationWarningCount == 0)) {
 664       warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
 665               " size=%d", loop_count, size);
 666     }
 667   } while (result == NULL);
 668 
 669   return result;
 670 }
 671 
 672 //
 673 // This is the policy code for permanent allocations which have failed
 674 // and require a collection. Note that just as in failed_mem_allocate,
 675 // we do not set collection policy, only where & when to allocate and
 676 // collect.
 677 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
 678   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 679   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 680   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 681   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 682   assert(size > perm_gen()->free_in_words(), "Allocation should fail");
 683 
 684   // We assume (and assert!) that an allocation at this point will fail
 685   // unless we collect.
 686 
 687   // First level allocation failure.  Mark-sweep and allocate in perm gen.
 688   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 689   invoke_full_gc(false);
 690   HeapWord* result = perm_gen()->allocate_permanent(size);
 691 
 692   // Second level allocation failure. We're running out of memory.
 693   if (result == NULL) {
 694     invoke_full_gc(true);
 695     result = perm_gen()->allocate_permanent(size);
 696   }
 697 
 698   return result;
 699 }
 700 
 701 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 702   CollectedHeap::ensure_parsability(retire_tlabs);
 703   young_gen()->eden_space()->ensure_parsability();
 704 }
 705 
 706 size_t ParallelScavengeHeap::unsafe_max_alloc() {
 707   return young_gen()->eden_space()->free_in_bytes();
 708 }
 709 
 710 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 711   return young_gen()->eden_space()->tlab_capacity(thr);
 712 }
 713 
 714 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 715   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 716 }
 717 
 718 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
 719   return young_gen()->allocate(size, true);
 720 }
 721 
 722 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 723   CollectedHeap::accumulate_statistics_all_tlabs();
 724 }
 725 
 726 void ParallelScavengeHeap::resize_all_tlabs() {
 727   CollectedHeap::resize_all_tlabs();
 728 }
 729 
 730 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
 731   // We don't need barriers for stores to objects in the
 732   // young gen and, a fortiori, for initializing stores to
 733   // objects therein.
 734   return is_in_young(new_obj);
 735 }
 736 
 737 // This method is used by System.gc() and JVMTI.
 738 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 739   assert(!Heap_lock->owned_by_self(),
 740     "this thread should not own the Heap_lock");
 741 
 742   unsigned int gc_count      = 0;
 743   unsigned int full_gc_count = 0;
 744   {
 745     MutexLocker ml(Heap_lock);
 746     // This value is guarded by the Heap_lock
 747     gc_count      = Universe::heap()->total_collections();
 748     full_gc_count = Universe::heap()->total_full_collections();
 749   }
 750 
 751   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 752   VMThread::execute(&op);
 753 }
 754 
 755 // This interface assumes that it's being called by the
 756 // vm thread. It collects the heap assuming that the
 757 // heap lock is already held and that we are executing in
 758 // the context of the vm thread.
 759 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
 760   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 761   assert(Heap_lock->is_locked(), "Precondition#2");
 762   GCCauseSetter gcs(this, cause);
 763   switch (cause) {
 764     case GCCause::_heap_inspection:
 765     case GCCause::_heap_dump: {
 766       HandleMark hm;
 767       invoke_full_gc(false);
 768       break;
 769     }
 770     default: // XXX FIX ME
 771       ShouldNotReachHere();
 772   }
 773 }
 774 
 775 
 776 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
 777   Unimplemented();
 778 }
 779 
 780 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 781   young_gen()->object_iterate(cl);
 782   old_gen()->object_iterate(cl);
 783   perm_gen()->object_iterate(cl);
 784 }
 785 
 786 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
 787   Unimplemented();
 788 }
 789 
 790 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
 791   perm_gen()->object_iterate(cl);
 792 }
 793 
 794 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 795   if (young_gen()->is_in_reserved(addr)) {
 796     assert(young_gen()->is_in(addr),
 797            "addr should be in allocated part of young gen");
 798     if (Debugging)  return NULL;  // called from find() in debug.cpp
 799     Unimplemented();
 800   } else if (old_gen()->is_in_reserved(addr)) {
 801     assert(old_gen()->is_in(addr),
 802            "addr should be in allocated part of old gen");
 803     return old_gen()->start_array()->object_start((HeapWord*)addr);
 804   } else if (perm_gen()->is_in_reserved(addr)) {
 805     assert(perm_gen()->is_in(addr),
 806            "addr should be in allocated part of perm gen");
 807     return perm_gen()->start_array()->object_start((HeapWord*)addr);
 808   }
 809   return 0;
 810 }
 811 
 812 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
 813   return oop(addr)->size();
 814 }
 815 
 816 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 817   return block_start(addr) == addr;
 818 }
 819 
 820 jlong ParallelScavengeHeap::millis_since_last_gc() {
 821   return UseParallelOldGC ?
 822     PSParallelCompact::millis_since_last_gc() :
 823     PSMarkSweep::millis_since_last_gc();
 824 }
 825 
 826 void ParallelScavengeHeap::prepare_for_verify() {
 827   ensure_parsability(false);  // no need to retire TLABs for verification
 828 }
 829 
 830 void ParallelScavengeHeap::print() const { print_on(tty); }
 831 
 832 void ParallelScavengeHeap::print_on(outputStream* st) const {
 833   young_gen()->print_on(st);
 834   old_gen()->print_on(st);
 835   perm_gen()->print_on(st);
 836 }
 837 
 838 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 839   PSScavenge::gc_task_manager()->threads_do(tc);
 840 }
 841 
 842 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
 843   PSScavenge::gc_task_manager()->print_threads_on(st);
 844 }
 845 
 846 void ParallelScavengeHeap::print_tracing_info() const {
 847   if (TraceGen0Time) {
 848     double time = PSScavenge::accumulated_time()->seconds();
 849     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
 850   }
 851   if (TraceGen1Time) {
 852     double time = PSMarkSweep::accumulated_time()->seconds();
 853     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
 854   }
 855 }
 856 
 857 
 858 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
 859   // Why do we need the total_collections()-filter below?
 860   if (total_collections() > 0) {
 861     if (!silent) {
 862       gclog_or_tty->print("permanent ");
 863     }
 864     perm_gen()->verify(allow_dirty);
 865 
 866     if (!silent) {
 867       gclog_or_tty->print("tenured ");
 868     }
 869     old_gen()->verify(allow_dirty);
 870 
 871     if (!silent) {
 872       gclog_or_tty->print("eden ");
 873     }
 874     young_gen()->verify(allow_dirty);
 875   }
 876   if (!silent) {
 877     gclog_or_tty->print("ref_proc ");
 878   }
 879   ReferenceProcessor::verify();
 880 }
 881 
 882 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
 883   if (PrintGCDetails && Verbose) {
 884     gclog_or_tty->print(" "  SIZE_FORMAT
 885                         "->" SIZE_FORMAT
 886                         "("  SIZE_FORMAT ")",
 887                         prev_used, used(), capacity());
 888   } else {
 889     gclog_or_tty->print(" "  SIZE_FORMAT "K"
 890                         "->" SIZE_FORMAT "K"
 891                         "("  SIZE_FORMAT "K)",
 892                         prev_used / K, used() / K, capacity() / K);
 893   }
 894 }
 895 
 896 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
 897   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
 898   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
 899   return _psh;
 900 }
 901 
 902 // Before delegating the resize to the young generation,
 903 // the reserved space for the young and old generations
 904 // may be changed to accomodate the desired resize.
 905 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
 906     size_t survivor_size) {
 907   if (UseAdaptiveGCBoundary) {
 908     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 909       size_policy()->reset_bytes_absorbed_from_eden();
 910       return;  // The generation changed size already.
 911     }
 912     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
 913   }
 914 
 915   // Delegate the resize to the generation.
 916   _young_gen->resize(eden_size, survivor_size);
 917 }
 918 
 919 // Before delegating the resize to the old generation,
 920 // the reserved space for the young and old generations
 921 // may be changed to accomodate the desired resize.
 922 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
 923   if (UseAdaptiveGCBoundary) {
 924     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 925       size_policy()->reset_bytes_absorbed_from_eden();
 926       return;  // The generation changed size already.
 927     }
 928     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
 929   }
 930 
 931   // Delegate the resize to the generation.
 932   _old_gen->resize(desired_free_space);
 933 }
 934 
 935 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
 936   // nothing particular
 937 }
 938 
 939 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
 940   // nothing particular
 941 }
 942 
 943 #ifndef PRODUCT
 944 void ParallelScavengeHeap::record_gen_tops_before_GC() {
 945   if (ZapUnusedHeapArea) {
 946     young_gen()->record_spaces_top();
 947     old_gen()->record_spaces_top();
 948     perm_gen()->record_spaces_top();
 949   }
 950 }
 951 
 952 void ParallelScavengeHeap::gen_mangle_unused_area() {
 953   if (ZapUnusedHeapArea) {
 954     young_gen()->eden_space()->mangle_unused_area();
 955     young_gen()->to_space()->mangle_unused_area();
 956     young_gen()->from_space()->mangle_unused_area();
 957     old_gen()->object_space()->mangle_unused_area();
 958     perm_gen()->object_space()->mangle_unused_area();
 959   }
 960 }
 961 #endif