1 /*
   2  * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 class CompactibleFreeListSpace;
  26 
  27 // A class for maintaining a free list of FreeChunk's.  The FreeList
  28 // maintains a the structure of the list (head, tail, etc.) plus
  29 // statistics for allocations from the list.  The links between items
  30 // are not part of FreeList.  The statistics are
  31 // used to make decisions about coalescing FreeChunk's when they
  32 // are swept during collection.
  33 //
  34 // See the corresponding .cpp file for a description of the specifics
  35 // for that implementation.
  36 
  37 class Mutex;
  38 class TreeList;
  39 
  40 class FreeList VALUE_OBJ_CLASS_SPEC {
  41   friend class CompactibleFreeListSpace;
  42   friend class VMStructs;
  43   friend class PrintTreeCensusClosure;
  44 
  45  protected:
  46   TreeList* _parent;
  47   TreeList* _left;
  48   TreeList* _right;
  49 
  50  private:
  51   FreeChunk*    _head;          // Head of list of free chunks
  52   FreeChunk*    _tail;          // Tail of list of free chunks
  53   size_t        _size;          // Size in Heap words of each chunk
  54   ssize_t       _count;         // Number of entries in list
  55   size_t        _hint;          // next larger size list with a positive surplus
  56 
  57   AllocationStats _allocation_stats; // allocation-related statistics
  58 
  59 #ifdef ASSERT
  60   Mutex*        _protecting_lock;
  61 #endif
  62 
  63   // Asserts false if the protecting lock (if any) is not held.
  64   void assert_proper_lock_protection_work() const PRODUCT_RETURN;
  65   void assert_proper_lock_protection() const {
  66 #ifdef ASSERT
  67     if (_protecting_lock != NULL)
  68       assert_proper_lock_protection_work();
  69 #endif
  70   }
  71 
  72   // Initialize the allocation statistics.
  73  protected:
  74   void init_statistics(bool split_birth = false);
  75   void set_count(ssize_t v) { _count = v;}
  76   void increment_count()    {
  77     _count++;
  78   }
  79 
  80   void decrement_count() {
  81     _count--;
  82     assert(_count >= 0, "Count should not be negative");
  83   }
  84 
  85  public:
  86   // Constructor
  87   // Construct a list without any entries.
  88   FreeList();
  89   // Construct a list with "fc" as the first (and lone) entry in the list.
  90   FreeList(FreeChunk* fc);
  91   // Construct a list which will have a FreeChunk at address "addr" and
  92   // of size "size" as the first (and lone) entry in the list.
  93   FreeList(HeapWord* addr, size_t size);
  94 
  95   // Reset the head, tail, hint, and count of a free list.
  96   void reset(size_t hint);
  97 
  98   // Declare the current free list to be protected by the given lock.
  99 #ifdef ASSERT
 100   void set_protecting_lock(Mutex* protecting_lock) {
 101     _protecting_lock = protecting_lock;
 102   }
 103 #endif
 104 
 105   // Accessors.
 106   FreeChunk* head() const {
 107     assert_proper_lock_protection();
 108     return _head;
 109   }
 110   void set_head(FreeChunk* v) {
 111     assert_proper_lock_protection();
 112     _head = v;
 113     assert(!_head || _head->size() == _size, "bad chunk size");
 114   }
 115   // Set the head of the list and set the prev field of non-null
 116   // values to NULL.
 117   void link_head(FreeChunk* v) {
 118     assert_proper_lock_protection();
 119     set_head(v);
 120     // If this method is not used (just set the head instead),
 121     // this check can be avoided.
 122     if (v != NULL) {
 123       v->linkPrev(NULL);
 124     }
 125   }
 126 
 127   FreeChunk* tail() const {
 128     assert_proper_lock_protection();
 129     return _tail;
 130   }
 131   void set_tail(FreeChunk* v) {
 132     assert_proper_lock_protection();
 133     _tail = v;
 134     assert(!_tail || _tail->size() == _size, "bad chunk size");
 135   }
 136   // Set the tail of the list and set the next field of non-null
 137   // values to NULL.
 138   void link_tail(FreeChunk* v) {
 139     assert_proper_lock_protection();
 140     set_tail(v);
 141     if (v != NULL) {
 142       v->clearNext();
 143     }
 144   }
 145 
 146   // No locking checks in read-accessors: lock-free reads (only) are benign.
 147   // Readers are expected to have the lock if they are doing work that
 148   // requires atomicity guarantees in sections of code.
 149   size_t size() const {
 150     return _size;
 151   }
 152   void set_size(size_t v) {
 153     assert_proper_lock_protection();
 154     _size = v;
 155   }
 156   ssize_t count() const {
 157     return _count;
 158   }
 159   size_t hint() const {
 160     return _hint;
 161   }
 162   void set_hint(size_t v) {
 163     assert_proper_lock_protection();
 164     assert(v == 0 || _size < v, "Bad hint"); _hint = v;
 165   }
 166 
 167   // Accessors for statistics
 168   AllocationStats* allocation_stats() {
 169     assert_proper_lock_protection();
 170     return &_allocation_stats;
 171   }
 172 
 173   ssize_t desired() const {
 174     return _allocation_stats.desired();
 175   }
 176   void set_desired(ssize_t v) {
 177     assert_proper_lock_protection();
 178     _allocation_stats.set_desired(v);
 179   }
 180   void compute_desired(float inter_sweep_current,
 181                        float inter_sweep_estimate,
 182                        float intra_sweep_estimate) {
 183     assert_proper_lock_protection();
 184     _allocation_stats.compute_desired(_count,
 185                                       inter_sweep_current,
 186                                       inter_sweep_estimate,
 187                                       intra_sweep_estimate);
 188   }
 189   ssize_t coalDesired() const {
 190     return _allocation_stats.coalDesired();
 191   }
 192   void set_coalDesired(ssize_t v) {
 193     assert_proper_lock_protection();
 194     _allocation_stats.set_coalDesired(v);
 195   }
 196 
 197   ssize_t surplus() const {
 198     return _allocation_stats.surplus();
 199   }
 200   void set_surplus(ssize_t v) {
 201     assert_proper_lock_protection();
 202     _allocation_stats.set_surplus(v);
 203   }
 204   void increment_surplus() {
 205     assert_proper_lock_protection();
 206     _allocation_stats.increment_surplus();
 207   }
 208   void decrement_surplus() {
 209     assert_proper_lock_protection();
 210     _allocation_stats.decrement_surplus();
 211   }
 212 
 213   ssize_t bfrSurp() const {
 214     return _allocation_stats.bfrSurp();
 215   }
 216   void set_bfrSurp(ssize_t v) {
 217     assert_proper_lock_protection();
 218     _allocation_stats.set_bfrSurp(v);
 219   }
 220   ssize_t prevSweep() const {
 221     return _allocation_stats.prevSweep();
 222   }
 223   void set_prevSweep(ssize_t v) {
 224     assert_proper_lock_protection();
 225     _allocation_stats.set_prevSweep(v);
 226   }
 227   ssize_t beforeSweep() const {
 228     return _allocation_stats.beforeSweep();
 229   }
 230   void set_beforeSweep(ssize_t v) {
 231     assert_proper_lock_protection();
 232     _allocation_stats.set_beforeSweep(v);
 233   }
 234 
 235   ssize_t coalBirths() const {
 236     return _allocation_stats.coalBirths();
 237   }
 238   void set_coalBirths(ssize_t v) {
 239     assert_proper_lock_protection();
 240     _allocation_stats.set_coalBirths(v);
 241   }
 242   void increment_coalBirths() {
 243     assert_proper_lock_protection();
 244     _allocation_stats.increment_coalBirths();
 245   }
 246 
 247   ssize_t coalDeaths() const {
 248     return _allocation_stats.coalDeaths();
 249   }
 250   void set_coalDeaths(ssize_t v) {
 251     assert_proper_lock_protection();
 252     _allocation_stats.set_coalDeaths(v);
 253   }
 254   void increment_coalDeaths() {
 255     assert_proper_lock_protection();
 256     _allocation_stats.increment_coalDeaths();
 257   }
 258 
 259   ssize_t splitBirths() const {
 260     return _allocation_stats.splitBirths();
 261   }
 262   void set_splitBirths(ssize_t v) {
 263     assert_proper_lock_protection();
 264     _allocation_stats.set_splitBirths(v);
 265   }
 266   void increment_splitBirths() {
 267     assert_proper_lock_protection();
 268     _allocation_stats.increment_splitBirths();
 269   }
 270 
 271   ssize_t splitDeaths() const {
 272     return _allocation_stats.splitDeaths();
 273   }
 274   void set_splitDeaths(ssize_t v) {
 275     assert_proper_lock_protection();
 276     _allocation_stats.set_splitDeaths(v);
 277   }
 278   void increment_splitDeaths() {
 279     assert_proper_lock_protection();
 280     _allocation_stats.increment_splitDeaths();
 281   }
 282 
 283   NOT_PRODUCT(
 284     // For debugging.  The "_returnedBytes" in all the lists are summed
 285     // and compared with the total number of bytes swept during a
 286     // collection.
 287     size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
 288     void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
 289     void increment_returnedBytes_by(size_t v) {
 290       _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
 291     }
 292   )
 293 
 294   // Unlink head of list and return it.  Returns NULL if
 295   // the list is empty.
 296   FreeChunk* getChunkAtHead();
 297 
 298   // Remove the first "n" or "count", whichever is smaller, chunks from the
 299   // list, setting "fl", which is required to be empty, to point to them.
 300   void getFirstNChunksFromList(size_t n, FreeList* fl);
 301 
 302   // Unlink this chunk from it's free list
 303   void removeChunk(FreeChunk* fc);
 304 
 305   // Add this chunk to this free list.
 306   void returnChunkAtHead(FreeChunk* fc);
 307   void returnChunkAtTail(FreeChunk* fc);
 308 
 309   // Similar to returnChunk* but also records some diagnostic
 310   // information.
 311   void returnChunkAtHead(FreeChunk* fc, bool record_return);
 312   void returnChunkAtTail(FreeChunk* fc, bool record_return);
 313 
 314   // Prepend "fl" (whose size is required to be the same as that of "this")
 315   // to the front of "this" list.
 316   void prepend(FreeList* fl);
 317 
 318   // Verify that the chunk is in the list.
 319   // found.  Return NULL if "fc" is not found.
 320   bool verifyChunkInFreeLists(FreeChunk* fc) const;
 321 
 322   // Stats verification
 323   void verify_stats() const PRODUCT_RETURN;
 324 
 325   // Printing support
 326   static void print_labels_on(outputStream* st, const char* c);
 327   void print_on(outputStream* st, const char* c = NULL) const;
 328 };