1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/metaspaceShared.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/atomic.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/task.hpp"
  33 #include "runtime/threadCritical.hpp"
  34 #include "services/memTracker.hpp"
  35 #include "utilities/ostream.hpp"
  36 
  37 //--------------------------------------------------------------------------------------
  38 // ChunkPool implementation
  39 
  40 // MT-safe pool of chunks to reduce malloc/free thrashing
  41 // NB: not using Mutex because pools are used before Threads are initialized
  42 class ChunkPool: public CHeapObj<mtInternal> {
  43   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
  44   size_t       _num_chunks;   // number of unused chunks in pool
  45   size_t       _num_used;     // number of chunks currently checked out
  46   const size_t _size;         // size of each chunk (must be uniform)
  47 
  48   // Our four static pools
  49   static ChunkPool* _large_pool;
  50   static ChunkPool* _medium_pool;
  51   static ChunkPool* _small_pool;
  52   static ChunkPool* _tiny_pool;
  53 
  54   // return first element or null
  55   void* get_first() {
  56     Chunk* c = _first;
  57     if (_first) {
  58       _first = _first->next();
  59       _num_chunks--;
  60     }
  61     return c;
  62   }
  63 
  64  public:
  65   // All chunks in a ChunkPool has the same size
  66    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
  67 
  68   // Allocate a new chunk from the pool (might expand the pool)
  69   NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
  70     assert(bytes == _size, "bad size");
  71     void* p = NULL;
  72     // No VM lock can be taken inside ThreadCritical lock, so os::malloc
  73     // should be done outside ThreadCritical lock due to NMT
  74     { ThreadCritical tc;
  75       _num_used++;
  76       p = get_first();
  77     }
  78     if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
  79     if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  80       vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
  81     }
  82     return p;
  83   }
  84 
  85   // Return a chunk to the pool
  86   void free(Chunk* chunk) {
  87     assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
  88     ThreadCritical tc;
  89     _num_used--;
  90 
  91     // Add chunk to list
  92     chunk->set_next(_first);
  93     _first = chunk;
  94     _num_chunks++;
  95   }
  96 
  97   // Prune the pool
  98   void free_all_but(size_t n) {
  99     Chunk* cur = NULL;
 100     Chunk* next;
 101     {
 102       // if we have more than n chunks, free all of them
 103       ThreadCritical tc;
 104       if (_num_chunks > n) {
 105         // free chunks at end of queue, for better locality
 106         cur = _first;
 107         for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
 108 
 109         if (cur != NULL) {
 110           next = cur->next();
 111           cur->set_next(NULL);
 112           cur = next;
 113 
 114           // Free all remaining chunks while in ThreadCritical lock
 115           // so NMT adjustment is stable.
 116           while(cur != NULL) {
 117             next = cur->next();
 118             os::free(cur);
 119             _num_chunks--;
 120             cur = next;
 121           }
 122         }
 123       }
 124     }
 125   }
 126 
 127   // Accessors to preallocated pool's
 128   static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
 129   static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
 130   static ChunkPool* small_pool()  { assert(_small_pool  != NULL, "must be initialized"); return _small_pool;  }
 131   static ChunkPool* tiny_pool()   { assert(_tiny_pool   != NULL, "must be initialized"); return _tiny_pool;   }
 132 
 133   static void initialize() {
 134     _large_pool  = new ChunkPool(Chunk::size        + Chunk::aligned_overhead_size());
 135     _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
 136     _small_pool  = new ChunkPool(Chunk::init_size   + Chunk::aligned_overhead_size());
 137     _tiny_pool   = new ChunkPool(Chunk::tiny_size   + Chunk::aligned_overhead_size());
 138   }
 139 
 140   static void clean() {
 141     enum { BlocksToKeep = 5 };
 142      _tiny_pool->free_all_but(BlocksToKeep);
 143      _small_pool->free_all_but(BlocksToKeep);
 144      _medium_pool->free_all_but(BlocksToKeep);
 145      _large_pool->free_all_but(BlocksToKeep);
 146   }
 147 };
 148 
 149 ChunkPool* ChunkPool::_large_pool  = NULL;
 150 ChunkPool* ChunkPool::_medium_pool = NULL;
 151 ChunkPool* ChunkPool::_small_pool  = NULL;
 152 ChunkPool* ChunkPool::_tiny_pool   = NULL;
 153 
 154 void chunkpool_init() {
 155   ChunkPool::initialize();
 156 }
 157 
 158 void
 159 Chunk::clean_chunk_pool() {
 160   ChunkPool::clean();
 161 }
 162 
 163 
 164 //--------------------------------------------------------------------------------------
 165 // ChunkPoolCleaner implementation
 166 //
 167 
 168 class ChunkPoolCleaner : public PeriodicTask {
 169   enum { CleaningInterval = 5000 };      // cleaning interval in ms
 170 
 171  public:
 172    ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
 173    void task() {
 174      ChunkPool::clean();
 175    }
 176 };
 177 
 178 //--------------------------------------------------------------------------------------
 179 // Chunk implementation
 180 
 181 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
 182   // requested_size is equal to sizeof(Chunk) but in order for the arena
 183   // allocations to come out aligned as expected the size must be aligned
 184   // to expected arena alignment.
 185   // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
 186   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
 187   size_t bytes = ARENA_ALIGN(requested_size) + length;
 188   switch (length) {
 189    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
 190    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
 191    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
 192    case Chunk::tiny_size:   return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
 193    default: {
 194      void* p = os::malloc(bytes, mtChunk, CALLER_PC);
 195      if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 196        vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
 197      }
 198      return p;
 199    }
 200   }
 201 }
 202 
 203 void Chunk::operator delete(void* p) {
 204   Chunk* c = (Chunk*)p;
 205   switch (c->length()) {
 206    case Chunk::size:        ChunkPool::large_pool()->free(c); break;
 207    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
 208    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
 209    case Chunk::tiny_size:   ChunkPool::tiny_pool()->free(c); break;
 210    default:
 211      ThreadCritical tc;  // Free chunks under TC lock so that NMT adjustment is stable.
 212      os::free(c);
 213   }
 214 }
 215 
 216 Chunk::Chunk(size_t length) : _len(length) {
 217   _next = NULL;         // Chain on the linked list
 218 }
 219 
 220 void Chunk::chop() {
 221   Chunk *k = this;
 222   while( k ) {
 223     Chunk *tmp = k->next();
 224     // clear out this chunk (to detect allocation bugs)
 225     if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
 226     delete k;                   // Free chunk (was malloc'd)
 227     k = tmp;
 228   }
 229 }
 230 
 231 void Chunk::next_chop() {
 232   _next->chop();
 233   _next = NULL;
 234 }
 235 
 236 void Chunk::start_chunk_pool_cleaner_task() {
 237 #ifdef ASSERT
 238   static bool task_created = false;
 239   assert(!task_created, "should not start chuck pool cleaner twice");
 240   task_created = true;
 241 #endif
 242   ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
 243   cleaner->enroll();
 244 }
 245 
 246 //------------------------------Arena------------------------------------------
 247 
 248 Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0)  {
 249   size_t round_size = (sizeof (char *)) - 1;
 250   init_size = (init_size+round_size) & ~round_size;
 251   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
 252   _hwm = _chunk->bottom();      // Save the cached hwm, max
 253   _max = _chunk->top();
 254   MemTracker::record_new_arena(flag);
 255   set_size_in_bytes(init_size);
 256 }
 257 
 258 Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
 259   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
 260   _hwm = _chunk->bottom();      // Save the cached hwm, max
 261   _max = _chunk->top();
 262   MemTracker::record_new_arena(flag);
 263   set_size_in_bytes(Chunk::init_size);
 264 }
 265 
 266 Arena *Arena::move_contents(Arena *copy) {
 267   copy->destruct_contents();
 268   copy->_chunk = _chunk;
 269   copy->_hwm   = _hwm;
 270   copy->_max   = _max;
 271   copy->_first = _first;
 272 
 273   // workaround rare racing condition, which could double count
 274   // the arena size by native memory tracking
 275   size_t size = size_in_bytes();
 276   set_size_in_bytes(0);
 277   copy->set_size_in_bytes(size);
 278   // Destroy original arena
 279   reset();
 280   return copy;            // Return Arena with contents
 281 }
 282 
 283 Arena::~Arena() {
 284   destruct_contents();
 285   MemTracker::record_arena_free(_flags);
 286 }
 287 
 288 void* Arena::operator new(size_t size) throw() {
 289   assert(false, "Use dynamic memory type binding");
 290   return NULL;
 291 }
 292 
 293 void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw() {
 294   assert(false, "Use dynamic memory type binding");
 295   return NULL;
 296 }
 297 
 298   // dynamic memory type binding
 299 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
 300   return (void *) AllocateHeap(size, flags, CALLER_PC);
 301 }
 302 
 303 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
 304   return (void*)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
 305 }
 306 
 307 void Arena::operator delete(void* p) {
 308   FreeHeap(p);
 309 }
 310 
 311 // Destroy this arenas contents and reset to empty
 312 void Arena::destruct_contents() {
 313   if (UseMallocOnly && _first != NULL) {
 314     char* end = _first->next() ? _first->top() : _hwm;
 315     free_malloced_objects(_first, _first->bottom(), end, _hwm);
 316   }
 317   // reset size before chop to avoid a rare racing condition
 318   // that can have total arena memory exceed total chunk memory
 319   set_size_in_bytes(0);
 320   _first->chop();
 321   reset();
 322 }
 323 
 324 // This is high traffic method, but many calls actually don't
 325 // change the size
 326 void Arena::set_size_in_bytes(size_t size) {
 327   if (_size_in_bytes != size) {
 328     long delta = (long)(size - size_in_bytes());
 329     _size_in_bytes = size;
 330     MemTracker::record_arena_size_change(delta, _flags);
 331   }
 332 }
 333 
 334 // Total of all Chunks in arena
 335 size_t Arena::used() const {
 336   size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
 337   Chunk *k = _first;
 338   while( k != _chunk) {         // Whilst have Chunks in a row
 339     sum += k->length();         // Total size of this Chunk
 340     k = k->next();              // Bump along to next Chunk
 341   }
 342   return sum;                   // Return total consumed space.
 343 }
 344 
 345 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
 346   vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);
 347 }
 348 
 349 // Grow a new Chunk
 350 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
 351   // Get minimal required size.  Either real big, or even bigger for giant objs
 352   size_t len = MAX2(x, (size_t) Chunk::size);
 353 
 354   Chunk *k = _chunk;            // Get filled-up chunk address
 355   _chunk = new (alloc_failmode, len) Chunk(len);
 356 
 357   if (_chunk == NULL) {
 358     _chunk = k;                 // restore the previous value of _chunk
 359     return NULL;
 360   }
 361   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
 362   else _first = _chunk;
 363   _hwm  = _chunk->bottom();     // Save the cached hwm, max
 364   _max =  _chunk->top();
 365   set_size_in_bytes(size_in_bytes() + len);
 366   void* result = _hwm;
 367   _hwm += x;
 368   return result;
 369 }
 370 
 371 
 372 
 373 // Reallocate storage in Arena.
 374 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
 375   if (new_size == 0) return NULL;
 376 #ifdef ASSERT
 377   if (UseMallocOnly) {
 378     // always allocate a new object  (otherwise we'll free this one twice)
 379     char* copy = (char*)Amalloc(new_size, alloc_failmode);
 380     if (copy == NULL) {
 381       return NULL;
 382     }
 383     size_t n = MIN2(old_size, new_size);
 384     if (n > 0) memcpy(copy, old_ptr, n);
 385     Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
 386     return copy;
 387   }
 388 #endif
 389   char *c_old = (char*)old_ptr; // Handy name
 390   // Stupid fast special case
 391   if( new_size <= old_size ) {  // Shrink in-place
 392     if( c_old+old_size == _hwm) // Attempt to free the excess bytes
 393       _hwm = c_old+new_size;    // Adjust hwm
 394     return c_old;
 395   }
 396 
 397   // make sure that new_size is legal
 398   size_t corrected_new_size = ARENA_ALIGN(new_size);
 399 
 400   // See if we can resize in-place
 401   if( (c_old+old_size == _hwm) &&       // Adjusting recent thing
 402       (c_old+corrected_new_size <= _max) ) {      // Still fits where it sits
 403     _hwm = c_old+corrected_new_size;      // Adjust hwm
 404     return c_old;               // Return old pointer
 405   }
 406 
 407   // Oops, got to relocate guts
 408   void *new_ptr = Amalloc(new_size, alloc_failmode);
 409   if (new_ptr == NULL) {
 410     return NULL;
 411   }
 412   memcpy( new_ptr, c_old, old_size );
 413   Afree(c_old,old_size);        // Mostly done to keep stats accurate
 414   return new_ptr;
 415 }
 416 
 417 
 418 // Determine if pointer belongs to this Arena or not.
 419 bool Arena::contains( const void *ptr ) const {
 420 #ifdef ASSERT
 421   if (UseMallocOnly) {
 422     // really slow, but not easy to make fast
 423     if (_chunk == NULL) return false;
 424     char** bottom = (char**)_chunk->bottom();
 425     for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
 426       if (*p == ptr) return true;
 427     }
 428     for (Chunk *c = _first; c != NULL; c = c->next()) {
 429       if (c == _chunk) continue;  // current chunk has been processed
 430       char** bottom = (char**)c->bottom();
 431       for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
 432         if (*p == ptr) return true;
 433       }
 434     }
 435     return false;
 436   }
 437 #endif
 438   if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
 439     return true;                // Check for in this chunk
 440   for (Chunk *c = _first; c; c = c->next()) {
 441     if (c == _chunk) continue;  // current chunk has been processed
 442     if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
 443       return true;              // Check for every chunk in Arena
 444     }
 445   }
 446   return false;                 // Not in any Chunk, so not in Arena
 447 }
 448 
 449 
 450 #ifdef ASSERT
 451 void* Arena::malloc(size_t size) {
 452   assert(UseMallocOnly, "shouldn't call");
 453   // use malloc, but save pointer in res. area for later freeing
 454   char** save = (char**)internal_malloc_4(sizeof(char*));
 455   return (*save = (char*)os::malloc(size, mtChunk));
 456 }
 457 
 458 // for debugging with UseMallocOnly
 459 void* Arena::internal_malloc_4(size_t x) {
 460   assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
 461   check_for_overflow(x, "Arena::internal_malloc_4");
 462   if (_hwm + x > _max) {
 463     return grow(x);
 464   } else {
 465     char *old = _hwm;
 466     _hwm += x;
 467     return old;
 468   }
 469 }
 470 #endif
 471 
 472 
 473 //--------------------------------------------------------------------------------------
 474 // Non-product code
 475 
 476 #ifndef PRODUCT
 477 
 478 julong Arena::_bytes_allocated = 0;
 479 
 480 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
 481 
 482 // debugging code
 483 inline void Arena::free_all(char** start, char** end) {
 484   for (char** p = start; p < end; p++) if (*p) os::free(*p);
 485 }
 486 
 487 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
 488   assert(UseMallocOnly, "should not call");
 489   // free all objects malloced since resource mark was created; resource area
 490   // contains their addresses
 491   if (chunk->next()) {
 492     // this chunk is full, and some others too
 493     for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
 494       char* top = c->top();
 495       if (c->next() == NULL) {
 496         top = hwm2;     // last junk is only used up to hwm2
 497         assert(c->contains(hwm2), "bad hwm2");
 498       }
 499       free_all((char**)c->bottom(), (char**)top);
 500     }
 501     assert(chunk->contains(hwm), "bad hwm");
 502     assert(chunk->contains(max), "bad max");
 503     free_all((char**)hwm, (char**)max);
 504   } else {
 505     // this chunk was partially used
 506     assert(chunk->contains(hwm), "bad hwm");
 507     assert(chunk->contains(hwm2), "bad hwm2");
 508     free_all((char**)hwm, (char**)hwm2);
 509   }
 510 }
 511 
 512 #endif // Non-product