1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_MEMORY_ARENA_HPP
  26 #define SHARE_MEMORY_ARENA_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/globals.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 #include <new>
  34 
  35 // The byte alignment to be used by Arena::Amalloc.  See bugid 4169348.
  36 // Note: this value must be a power of 2
  37 
  38 #define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord)
  39 
  40 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
  41 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
  42 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
  43 
  44 //------------------------------Chunk------------------------------------------
  45 // Linked list of raw memory chunks
  46 class Chunk: CHeapObj<mtChunk> {
  47 
  48  private:
  49   Chunk*       _next;     // Next Chunk in list
  50   const size_t _len;      // Size of this Chunk
  51  public:
  52   void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
  53   void  operator delete(void* p);
  54   Chunk(size_t length);
  55 
  56   enum {
  57     // default sizes; make them slightly smaller than 2**k to guard against
  58     // buddy-system style malloc implementations
  59 #ifdef _LP64
  60     slack      = 40,            // [RGV] Not sure if this is right, but make it
  61                                 //       a multiple of 8.
  62 #else
  63     slack      = 20,            // suspected sizeof(Chunk) + internal malloc headers
  64 #endif
  65 
  66     tiny_size  =  256  - slack, // Size of first chunk (tiny)
  67     init_size  =  1*K  - slack, // Size of first chunk (normal aka small)
  68     medium_size= 10*K  - slack, // Size of medium-sized chunk
  69     size       = 32*K  - slack, // Default size of an Arena chunk (following the first)
  70     non_pool_size = init_size + 32 // An initial size which is not one of above
  71   };
  72 
  73   void chop();                  // Chop this chunk
  74   void next_chop();             // Chop next chunk
  75   static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
  76   static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
  77 
  78   size_t length() const         { return _len;  }
  79   Chunk* next() const           { return _next;  }
  80   void set_next(Chunk* n)       { _next = n;  }
  81   // Boundaries of data area (possibly unused)
  82   char* bottom() const          { return ((char*) this) + aligned_overhead_size();  }
  83   char* top()    const          { return bottom() + _len; }
  84   bool contains(char* p) const  { return bottom() <= p && p <= top(); }
  85 
  86   // Start the chunk_pool cleaner task
  87   static void start_chunk_pool_cleaner_task();
  88 
  89   static void clean_chunk_pool();
  90 };
  91 
  92 //------------------------------Arena------------------------------------------
  93 // Fast allocation of memory
  94 class Arena : public CHeapObj<mtNone> {
  95 protected:
  96   friend class ResourceMark;
  97   friend class HandleMark;
  98   friend class NoHandleMark;
  99   friend class VMStructs;
 100 
 101   MEMFLAGS    _flags;           // Memory tracking flags
 102 
 103   Chunk *_first;                // First chunk
 104   Chunk *_chunk;                // current chunk
 105   char *_hwm, *_max;            // High water mark and max in current chunk
 106   // Get a new Chunk of at least size x
 107   void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
 108   size_t _size_in_bytes;        // Size of arena (used for native memory tracking)
 109 
 110 #ifndef PRODUCT
 111   static julong _bytes_allocated; // total #bytes allocated since start
 112   // Inlinable increment function (not atomic on MP, but avoids word-tearing on 32 bit)
 113   void inc_bytes_allocated(size_t x) {
 114     // Only do it if needed. Otherwise avoid contention.
 115     if (PrintMallocStatistics) {
 116 #ifdef _LP64
 117       _bytes_allocated += x;
 118 #else
 119       julong value = Atomic::load(&_bytes_allocated);
 120       Atomic::store(value + x, &_bytes_allocated);
 121 #endif
 122     }
 123   }
 124 #endif
 125 
 126   friend class AllocStats;
 127   debug_only(void* malloc(size_t size);)
 128   debug_only(void* internal_malloc_4(size_t x);)
 129 
 130   void signal_out_of_memory(size_t request, const char* whence) const;
 131 
 132   bool check_for_overflow(size_t request, const char* whence,
 133       AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
 134     if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
 135       if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
 136         return false;
 137       }
 138       signal_out_of_memory(request, whence);
 139     }
 140     return true;
 141  }
 142 
 143  public:
 144   Arena(MEMFLAGS memflag);
 145   Arena(MEMFLAGS memflag, size_t init_size);
 146   ~Arena();
 147   void  destruct_contents();
 148   char* hwm() const             { return _hwm; }
 149 
 150   // new operators
 151   void* operator new (size_t size) throw();
 152   void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
 153 
 154   // dynamic memory type tagging
 155   void* operator new(size_t size, MEMFLAGS flags) throw();
 156   void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
 157   void  operator delete(void* p);
 158 
 159   // Fast allocate in the arena.  Common case is: pointer test + increment.
 160   void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
 161     assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
 162     x = ARENA_ALIGN(x);
 163     debug_only(if (UseMallocOnly) return malloc(x);)
 164     if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
 165       return NULL;
 166     NOT_PRODUCT(inc_bytes_allocated(x);)
 167     if (_hwm + x > _max) {
 168       return grow(x, alloc_failmode);
 169     } else {
 170       char *old = _hwm;
 171       _hwm += x;
 172       return old;
 173     }
 174   }
 175   // Further assume size is padded out to words
 176   void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
 177     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
 178     debug_only(if (UseMallocOnly) return malloc(x);)
 179     if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
 180       return NULL;
 181     NOT_PRODUCT(inc_bytes_allocated(x);)
 182     if (_hwm + x > _max) {
 183       return grow(x, alloc_failmode);
 184     } else {
 185       char *old = _hwm;
 186       _hwm += x;
 187       return old;
 188     }
 189   }
 190 
 191   // Allocate with 'double' alignment. It is 8 bytes on sparc.
 192   // In other cases Amalloc_D() should be the same as Amalloc_4().
 193   void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
 194     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
 195     debug_only(if (UseMallocOnly) return malloc(x);)
 196 #if defined(SPARC) && !defined(_LP64)
 197 #define DALIGN_M1 7
 198     size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
 199     x += delta;
 200 #endif
 201     if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
 202       return NULL;
 203     NOT_PRODUCT(inc_bytes_allocated(x);)
 204     if (_hwm + x > _max) {
 205       return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
 206     } else {
 207       char *old = _hwm;
 208       _hwm += x;
 209 #if defined(SPARC) && !defined(_LP64)
 210       old += delta; // align to 8-bytes
 211 #endif
 212       return old;
 213     }
 214   }
 215 
 216   // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
 217   bool Afree(void *ptr, size_t size) {
 218 #ifdef ASSERT
 219     if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
 220     if (UseMallocOnly) return true;
 221 #endif
 222     if (((char*)ptr) + size == _hwm) {
 223       _hwm = (char*)ptr;
 224       return true;
 225     } else {
 226       // Unable to fast free, so we just drop it.
 227       return false;
 228     }
 229   }
 230 
 231   void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
 232       AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
 233 
 234   // Move contents of this arena into an empty arena
 235   Arena *move_contents(Arena *empty_arena);
 236 
 237   // Determine if pointer belongs to this Arena or not.
 238   bool contains( const void *ptr ) const;
 239 
 240   // Total of all chunks in use (not thread-safe)
 241   size_t used() const;
 242 
 243   // Total # of bytes used
 244   size_t size_in_bytes() const         {  return _size_in_bytes; };
 245   void set_size_in_bytes(size_t size);
 246 
 247   static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
 248   static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
 249 
 250 private:
 251   // Reset this Arena to empty, access will trigger grow if necessary
 252   void   reset(void) {
 253     _first = _chunk = NULL;
 254     _hwm = _max = NULL;
 255     set_size_in_bytes(0);
 256   }
 257 };
 258 
 259 // One of the following macros must be used when allocating
 260 // an array or object from an arena
 261 #define NEW_ARENA_ARRAY(arena, type, size) \
 262   (type*) (arena)->Amalloc((size) * sizeof(type))
 263 
 264 #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)    \
 265   (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
 266                             (new_size) * sizeof(type) )
 267 
 268 #define FREE_ARENA_ARRAY(arena, type, old, size) \
 269   (arena)->Afree((char*)(old), (size) * sizeof(type))
 270 
 271 #define NEW_ARENA_OBJ(arena, type) \
 272   NEW_ARENA_ARRAY(arena, type, 1)
 273 
 274 #endif // SHARE_MEMORY_ARENA_HPP