1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 //
  26 // Free block maintenance for Concurrent Mark Sweep Generation
  27 //
  28 // The main data structure for free blocks are
  29 // . an indexed array of small free blocks, and
  30 // . a dictionary of large free blocks
  31 //
  32 
  33 // No virtuals in FreeChunk (don't want any vtables).
  34 
  35 // A FreeChunk is merely a chunk that can be in a doubly linked list
  36 // and has a size field. NOTE: FreeChunks are distinguished from allocated
  37 // objects in two ways (by the sweeper), depending on whether the VM is 32 or
  38 // 64 bits.
  39 // In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
  40 // LSB set to indicate a free chunk; allocated objects' klass() pointers
  41 // don't have their LSB set. The corresponding bit in the CMSBitMap is
  42 // set when the chunk is allocated. There are also blocks that "look free"
  43 // but are not part of the free list and should not be coalesced into larger
  44 // free blocks. These free blocks have their two LSB's set.
  45 
  46 class FreeChunk VALUE_OBJ_CLASS_SPEC {
  47   friend class VMStructs;
  48   // For 64 bit compressed oops, the markOop encodes both the size and the
  49   // indication that this is a FreeChunk and not an object.
  50   volatile size_t   _size;
  51   FreeChunk* _prev;
  52   FreeChunk* _next;
  53 
  54   markOop mark()     const volatile { return (markOop)_size; }
  55   void set_mark(markOop m)          { _size = (size_t)m; }
  56 
  57  public:
  58   NOT_PRODUCT(static const size_t header_size();)
  59 
  60   // Returns "true" if the address indicates that the block represents
  61   // a free chunk.
  62   static bool indicatesFreeChunk(const HeapWord* addr) {
  63     // Force volatile read from addr because value might change between
  64     // calls.  We really want the read of _mark and _prev from this pointer
  65     // to be volatile but making the fields volatile causes all sorts of
  66     // compilation errors.
  67     return ((volatile FreeChunk*)addr)->isFree();
  68   }
  69 
  70   bool isFree() const volatile {
  71     LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
  72     return (((intptr_t)_prev) & 0x1) == 0x1;
  73   }
  74   bool cantCoalesce() const {
  75     assert(isFree(), "can't get coalesce bit on not free");
  76     return (((intptr_t)_prev) & 0x2) == 0x2;
  77   }
  78   void dontCoalesce() {
  79     // the block should be free
  80     assert(isFree(), "Should look like a free block");
  81     _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
  82   }
  83   FreeChunk* prev() const {
  84     return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
  85   }
  86 
  87   debug_only(void* prev_addr() const { return (void*)&_prev; })
  88   debug_only(void* next_addr() const { return (void*)&_next; })
  89   debug_only(void* size_addr() const { return (void*)&_size; })
  90 
  91   size_t size() const volatile {
  92     LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
  93     return _size;
  94   }
  95   void setSize(size_t sz) {
  96     LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
  97     _size = sz;
  98   }
  99 
 100   FreeChunk* next()   const { return _next; }
 101 
 102   void linkAfter(FreeChunk* ptr) {
 103     linkNext(ptr);
 104     if (ptr != NULL) ptr->linkPrev(this);
 105   }
 106   void linkAfterNonNull(FreeChunk* ptr) {
 107     assert(ptr != NULL, "precondition violation");
 108     linkNext(ptr);
 109     ptr->linkPrev(this);
 110   }
 111   void linkNext(FreeChunk* ptr) { _next = ptr; }
 112   void linkPrev(FreeChunk* ptr) {
 113     LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
 114     _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
 115   }
 116   void clearPrev()              { _prev = NULL; }
 117   void clearNext()              { _next = NULL; }
 118   void markNotFree() {
 119     // Set _prev (klass) to null before (if) clearing the mark word below
 120     _prev = NULL;
 121 #ifdef _LP64
 122     if (UseCompressedOops) {
 123       OrderAccess::storestore();
 124       set_mark(markOopDesc::prototype());
 125     }
 126 #endif
 127     assert(!isFree(), "Error");
 128   }
 129 
 130   // Return the address past the end of this chunk
 131   HeapWord* end() const { return ((HeapWord*) this) + size(); }
 132 
 133   // debugging
 134   void verify()             const PRODUCT_RETURN;
 135   void verifyList()         const PRODUCT_RETURN;
 136   void mangleAllocated(size_t size) PRODUCT_RETURN;
 137   void mangleFreed(size_t size)     PRODUCT_RETURN;
 138 
 139   void print_on(outputStream* st);
 140 };
 141 
 142 extern size_t MinChunkSize;
 143