1 /*
   2  * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_psPermGen.cpp.incl"
  27 
  28 PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
  29                      size_t initial_size, size_t min_size, size_t max_size,
  30                      const char* gen_name, int level) :
  31   PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level),
  32   _last_used(0)
  33 {
  34   assert(object_mark_sweep() != NULL, "Sanity");
  35 
  36   object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio);
  37   _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight,
  38                                         PermGenPadding);
  39 }
  40 
  41 HeapWord* PSPermGen::allocate_permanent(size_t size) {
  42   assert_locked_or_safepoint(Heap_lock);
  43   HeapWord* obj = allocate_noexpand(size, false);
  44 
  45   if (obj == NULL) {
  46     obj = expand_and_allocate(size, false);
  47   }
  48 
  49   return obj;
  50 }
  51 
  52 void PSPermGen::compute_new_size(size_t used_before_collection) {
  53   // Update our padded average of objects allocated in perm
  54   // gen between collections.
  55   assert(used_before_collection >= _last_used,
  56                                 "negative allocation amount since last GC?");
  57 
  58   const size_t alloc_since_last_gc = used_before_collection - _last_used;
  59   _avg_size->sample(alloc_since_last_gc);
  60 
  61   const size_t current_live = used_in_bytes();
  62   // Stash away the current amount live for the next call to this method.
  63   _last_used = current_live;
  64 
  65   // We have different alignment constraints than the rest of the heap.
  66   const size_t alignment = MAX2(MinPermHeapExpansion,
  67                                 virtual_space()->alignment());
  68 
  69   // Compute the desired size:
  70   //  The free space is the newly computed padded average,
  71   //  so the desired size is what's live + the free space.
  72   size_t desired_size = current_live + (size_t)_avg_size->padded_average();
  73   desired_size = align_size_up(desired_size, alignment);
  74 
  75   // ...and no larger or smaller than our max and min allowed.
  76   desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size);
  77   assert(desired_size <= _max_gen_size, "just checking");
  78 
  79   const size_t size_before = _virtual_space->committed_size();
  80 
  81   if (desired_size == size_before) {
  82     // no change, we're done
  83     return;
  84   }
  85 
  86   {
  87     // We'll be growing or shrinking the heap:  in either case,
  88     // we need to hold a lock.
  89     MutexLocker x(ExpandHeap_lock);
  90     if (desired_size > size_before) {
  91       const size_t change_bytes = desired_size - size_before;
  92       const size_t aligned_change_bytes =
  93         align_size_up(change_bytes, alignment);
  94       expand_by(aligned_change_bytes);
  95     } else {
  96       // Shrinking
  97       const size_t change_bytes =
  98         size_before - desired_size;
  99       const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
 100       shrink(aligned_change_bytes);
 101     }
 102   }
 103 
 104   // While this code isn't controlled by AdaptiveSizePolicy, it's
 105   // convenient to see all resizing decsions under the same flag.
 106   if (PrintAdaptiveSizePolicy) {
 107     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 108     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 109 
 110     gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: "
 111                            "collection: %d "
 112                            "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 113                            heap->total_collections(),
 114                            size_before, _virtual_space->committed_size());
 115   }
 116 }
 117 
 118 
 119 
 120 void PSPermGen::move_and_update(ParCompactionManager* cm) {
 121   PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id);
 122 }
 123 
 124 void PSPermGen::precompact() {
 125   // Reset start array first.
 126   _start_array.reset();
 127   object_mark_sweep()->precompact();
 128 }