1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
  26 #define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
  27 
  28 #include "memory/freeList.hpp"
  29 #include "gc_implementation/shared/allocationStats.hpp"
  30 
  31 class CompactibleFreeListSpace;
  32 
  33 // A class for maintaining a free list of Chunk's.  The FreeList
  34 // maintains a the structure of the list (head, tail, etc.) plus
  35 // statistics for allocations from the list.  The links between items
  36 // are not part of FreeList.  The statistics are
  37 // used to make decisions about coalescing Chunk's when they
  38 // are swept during collection.
  39 //
  40 // See the corresponding .cpp file for a description of the specifics
  41 // for that implementation.
  42 
  43 class Mutex;
  44 
  45 template <class Chunk>
  46 class AdaptiveFreeList : public FreeList<Chunk> {
  47   friend class CompactibleFreeListSpace;
  48   friend class VMStructs;
  49   // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
  50 
  51   size_t        _hint;          // next larger size list with a positive surplus
  52 
  53   AllocationStats _allocation_stats; // allocation-related statistics
  54 
  55  public:
  56 
  57   AdaptiveFreeList();
  58   AdaptiveFreeList(Chunk* fc);
  59 
  60   using FreeList<Chunk>::assert_proper_lock_protection;
  61 #ifdef ASSERT
  62   using FreeList<Chunk>::protecting_lock;
  63 #endif
  64   using FreeList<Chunk>::count;
  65   using FreeList<Chunk>::size;
  66   using FreeList<Chunk>::verify_chunk_in_free_list;
  67   using FreeList<Chunk>::getFirstNChunksFromList;
  68   using FreeList<Chunk>::print_on;
  69   void return_chunk_at_head(Chunk* fc, bool record_return, bool deallocate_pages);
  70   void return_chunk_at_head(Chunk* fc);
  71   void return_chunk_at_tail(Chunk* fc, bool record_return, bool deallocate_pages);
  72   void return_chunk_at_tail(Chunk* fc);
  73   using FreeList<Chunk>::return_chunk_at_tail;
  74   using FreeList<Chunk>::remove_chunk;
  75   using FreeList<Chunk>::prepend;
  76   using FreeList<Chunk>::print_labels_on;
  77   using FreeList<Chunk>::get_chunk_at_head;
  78 
  79   // Initialize.
  80   void initialize();
  81 
  82   // Reset the head, tail, hint, and count of a free list.
  83   void reset(size_t hint);
  84 
  85   void assert_proper_lock_protection_work() const PRODUCT_RETURN;
  86 
  87   void print_on(outputStream* st, const char* c = NULL) const;
  88 
  89   size_t hint() const {
  90     return _hint;
  91   }
  92   void set_hint(size_t v) {
  93     assert_proper_lock_protection();
  94     assert(v == 0 || size() < v, "Bad hint");
  95     _hint = v;
  96   }
  97 
  98   size_t get_better_size();
  99 
 100   // Accessors for statistics
 101   void init_statistics(bool split_birth = false);
 102 
 103   AllocationStats* allocation_stats() {
 104     assert_proper_lock_protection();
 105     return &_allocation_stats;
 106   }
 107 
 108   ssize_t desired() const {
 109     return _allocation_stats.desired();
 110   }
 111   void set_desired(ssize_t v) {
 112     assert_proper_lock_protection();
 113     _allocation_stats.set_desired(v);
 114   }
 115   void compute_desired(float inter_sweep_current,
 116                        float inter_sweep_estimate,
 117                        float intra_sweep_estimate) {
 118     assert_proper_lock_protection();
 119     _allocation_stats.compute_desired(count(),
 120                                       inter_sweep_current,
 121                                       inter_sweep_estimate,
 122                                       intra_sweep_estimate);
 123   }
 124   ssize_t coal_desired() const {
 125     return _allocation_stats.coal_desired();
 126   }
 127   void set_coal_desired(ssize_t v) {
 128     assert_proper_lock_protection();
 129     _allocation_stats.set_coal_desired(v);
 130   }
 131 
 132   ssize_t surplus() const {
 133     return _allocation_stats.surplus();
 134   }
 135   void set_surplus(ssize_t v) {
 136     assert_proper_lock_protection();
 137     _allocation_stats.set_surplus(v);
 138   }
 139   void increment_surplus() {
 140     assert_proper_lock_protection();
 141     _allocation_stats.increment_surplus();
 142   }
 143   void decrement_surplus() {
 144     assert_proper_lock_protection();
 145     _allocation_stats.decrement_surplus();
 146   }
 147 
 148   ssize_t bfr_surp() const {
 149     return _allocation_stats.bfr_surp();
 150   }
 151   void set_bfr_surp(ssize_t v) {
 152     assert_proper_lock_protection();
 153     _allocation_stats.set_bfr_surp(v);
 154   }
 155   ssize_t prev_sweep() const {
 156     return _allocation_stats.prev_sweep();
 157   }
 158   void set_prev_sweep(ssize_t v) {
 159     assert_proper_lock_protection();
 160     _allocation_stats.set_prev_sweep(v);
 161   }
 162   ssize_t before_sweep() const {
 163     return _allocation_stats.before_sweep();
 164   }
 165   void set_before_sweep(ssize_t v) {
 166     assert_proper_lock_protection();
 167     _allocation_stats.set_before_sweep(v);
 168   }
 169 
 170   ssize_t coal_births() const {
 171     return _allocation_stats.coal_births();
 172   }
 173   void set_coal_births(ssize_t v) {
 174     assert_proper_lock_protection();
 175     _allocation_stats.set_coal_births(v);
 176   }
 177   void increment_coal_births() {
 178     assert_proper_lock_protection();
 179     _allocation_stats.increment_coal_births();
 180   }
 181 
 182   ssize_t coal_deaths() const {
 183     return _allocation_stats.coal_deaths();
 184   }
 185   void set_coal_deaths(ssize_t v) {
 186     assert_proper_lock_protection();
 187     _allocation_stats.set_coal_deaths(v);
 188   }
 189   void increment_coal_deaths() {
 190     assert_proper_lock_protection();
 191     _allocation_stats.increment_coal_deaths();
 192   }
 193 
 194   ssize_t split_births() const {
 195     return _allocation_stats.split_births();
 196   }
 197   void set_split_births(ssize_t v) {
 198     assert_proper_lock_protection();
 199     _allocation_stats.set_split_births(v);
 200   }
 201   void increment_split_births() {
 202     assert_proper_lock_protection();
 203     _allocation_stats.increment_split_births();
 204   }
 205 
 206   ssize_t split_deaths() const {
 207     return _allocation_stats.split_deaths();
 208   }
 209   void set_split_deaths(ssize_t v) {
 210     assert_proper_lock_protection();
 211     _allocation_stats.set_split_deaths(v);
 212   }
 213   void increment_split_deaths() {
 214     assert_proper_lock_protection();
 215     _allocation_stats.increment_split_deaths();
 216   }
 217 
 218 #ifndef PRODUCT
 219   // For debugging.  The "_returned_bytes" in all the lists are summed
 220   // and compared with the total number of bytes swept during a
 221   // collection.
 222   size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
 223   void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
 224   void increment_returned_bytes_by(size_t v) {
 225     _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
 226   }
 227   // Stats verification
 228   void verify_stats() const;
 229 #endif  // NOT PRODUCT
 230 };
 231 
 232 #endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP