1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  27 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  28 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  29 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
  30 #include "gc_implementation/shared/gcUtil.hpp"
  31 #include "gc_implementation/shared/markSweep.inline.hpp"
  32 #include "oops/markOop.inline.hpp"
  33 
  34 PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
  35                      size_t initial_size, size_t min_size, size_t max_size,
  36                      const char* gen_name, int level) :
  37   PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level),
  38   _last_used(0)
  39 {
  40   assert(object_mark_sweep() != NULL, "Sanity");
  41 
  42   object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio);
  43   _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight,
  44                                         PermGenPadding);
  45 }
  46 
  47 HeapWord* PSPermGen::allocate_permanent(size_t size) {
  48   assert_locked_or_safepoint(Heap_lock);
  49   HeapWord* obj = allocate_noexpand(size, false);
  50 
  51   if (obj == NULL) {
  52     obj = expand_and_allocate(size, false);
  53   }
  54 
  55   return obj;
  56 }
  57 
  58 void PSPermGen::compute_new_size(size_t used_before_collection) {
  59   // Update our padded average of objects allocated in perm
  60   // gen between collections.
  61   assert(used_before_collection >= _last_used,
  62                                 "negative allocation amount since last GC?");
  63 
  64   const size_t alloc_since_last_gc = used_before_collection - _last_used;
  65   _avg_size->sample(alloc_since_last_gc);
  66 
  67   const size_t current_live = used_in_bytes();
  68   // Stash away the current amount live for the next call to this method.
  69   _last_used = current_live;
  70 
  71   // We have different alignment constraints than the rest of the heap.
  72   const size_t alignment = MAX2(MinPermHeapExpansion,
  73                                 virtual_space()->alignment());
  74 
  75   // Compute the desired size:
  76   //  The free space is the newly computed padded average,
  77   //  so the desired size is what's live + the free space.
  78   size_t desired_size = current_live + (size_t)_avg_size->padded_average();
  79   desired_size = align_size_up(desired_size, alignment);
  80 
  81   // ...and no larger or smaller than our max and min allowed.
  82   desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size);
  83   assert(desired_size <= _max_gen_size, "just checking");
  84 
  85   const size_t size_before = _virtual_space->committed_size();
  86 
  87   if (desired_size == size_before) {
  88     // no change, we're done
  89     return;
  90   }
  91 
  92   {
  93     // We'll be growing or shrinking the heap:  in either case,
  94     // we need to hold a lock.
  95     MutexLocker x(ExpandHeap_lock);
  96     if (desired_size > size_before) {
  97       const size_t change_bytes = desired_size - size_before;
  98       const size_t aligned_change_bytes =
  99         align_size_up(change_bytes, alignment);
 100       expand_by(aligned_change_bytes);
 101     } else {
 102       // Shrinking
 103       const size_t change_bytes =
 104         size_before - desired_size;
 105       const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
 106       shrink(aligned_change_bytes);
 107     }
 108   }
 109 
 110   // While this code isn't controlled by AdaptiveSizePolicy, it's
 111   // convenient to see all resizing decsions under the same flag.
 112   if (PrintAdaptiveSizePolicy) {
 113     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 114     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 115 
 116     gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: "
 117                            "collection: %d "
 118                            "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 119                            heap->total_collections(),
 120                            size_before, _virtual_space->committed_size());
 121   }
 122 }
 123 
 124 
 125 
 126 void PSPermGen::move_and_update(ParCompactionManager* cm) {
 127   PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id);
 128 }
 129 
 130 void PSPermGen::precompact() {
 131   // Reset start array first.
 132   _start_array.reset();
 133   object_mark_sweep()->precompact();
 134 }