1 /*
   2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/cSpaceCounters.hpp"
  27 #include "gc_implementation/shared/vmGCOperations.hpp"
  28 #include "gc_interface/collectedHeap.inline.hpp"
  29 #include "memory/blockOffsetTable.inline.hpp"
  30 #include "memory/compactPermGen.hpp"
  31 #include "memory/gcLocker.hpp"
  32 #include "memory/gcLocker.inline.hpp"
  33 #include "memory/genCollectedHeap.hpp"
  34 #include "memory/generation.inline.hpp"
  35 #include "memory/permGen.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/vmThread.hpp"
  40 
  41 HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
  42   GCCause::Cause next_cause = GCCause::_permanent_generation_full;
  43   GCCause::Cause prev_cause = GCCause::_no_gc;
  44   unsigned int gc_count_before, full_gc_count_before;
  45   HeapWord* obj;
  46 
  47   for (;;) {
  48     {
  49       MutexLocker ml(Heap_lock);
  50       if ((obj = gen->allocate(size, false)) != NULL) {
  51         return obj;
  52       }
  53       if (gen->capacity() < _capacity_expansion_limit ||
  54           prev_cause != GCCause::_no_gc) {
  55         obj = gen->expand_and_allocate(size, false);
  56       }
  57       if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) {
  58         return obj;
  59       }
  60       if (GC_locker::is_active_and_needs_gc()) {
  61         // If this thread is not in a jni critical section, we stall
  62         // the requestor until the critical section has cleared and
  63         // GC allowed. When the critical section clears, a GC is
  64         // initiated by the last thread exiting the critical section; so
  65         // we retry the allocation sequence from the beginning of the loop,
  66         // rather than causing more, now probably unnecessary, GC attempts.
  67         JavaThread* jthr = JavaThread::current();
  68         if (!jthr->in_critical()) {
  69           MutexUnlocker mul(Heap_lock);
  70           // Wait for JNI critical section to be exited
  71           GC_locker::stall_until_clear();
  72           continue;
  73         } else {
  74           if (CheckJNICalls) {
  75             fatal("Possible deadlock due to allocating while"
  76                   " in jni critical section");
  77           }
  78           return NULL;
  79         }
  80       }
  81       // Read the GC count while holding the Heap_lock
  82       gc_count_before      = SharedHeap::heap()->total_collections();
  83       full_gc_count_before = SharedHeap::heap()->total_full_collections();
  84     }
  85 
  86     // Give up heap lock above, VMThread::execute below gets it back
  87     VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
  88                                            next_cause);
  89     VMThread::execute(&op);
  90     if (!op.prologue_succeeded() || op.gc_locked()) {
  91       assert(op.result() == NULL, "must be NULL if gc_locked() is true");
  92       continue;  // retry and/or stall as necessary
  93     }
  94     obj = op.result();
  95     assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
  96            "result not in heap");
  97     if (obj != NULL) {
  98       return obj;
  99     }
 100     prev_cause = next_cause;
 101     next_cause = GCCause::_last_ditch_collection;
 102   }
 103 }
 104 
 105 CompactingPermGen::CompactingPermGen(ReservedSpace rs,
 106                                      ReservedSpace shared_rs,
 107                                      size_t initial_byte_size,
 108                                      GenRemSet* remset,
 109                                      PermanentGenerationSpec* perm_spec)
 110 {
 111   CompactingPermGenGen* g =
 112     new CompactingPermGenGen(rs, shared_rs, initial_byte_size, -1, remset,
 113                              NULL, perm_spec);
 114   if (g == NULL)
 115     vm_exit_during_initialization("Could not allocate a CompactingPermGen");
 116   _gen = g;
 117 
 118   g->initialize_performance_counters();
 119 
 120   _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion;
 121 }
 122 
 123 HeapWord* CompactingPermGen::mem_allocate(size_t size) {
 124   return mem_allocate_in_gen(size, _gen);
 125 }
 126 
 127 void CompactingPermGen::compute_new_size() {
 128   size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion);
 129   if (desired_capacity < PermSize) {
 130     desired_capacity = PermSize;
 131   }
 132   if (_gen->capacity() > desired_capacity) {
 133     _gen->shrink(_gen->capacity() - desired_capacity);
 134   }
 135   _capacity_expansion_limit = _gen->capacity() + MaxPermHeapExpansion;
 136 }