1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/genCollectedHeap.hpp"
  29 #include "memory/metaspaceShared.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/os.hpp"
  34 #include "runtime/task.hpp"
  35 #include "runtime/threadCritical.hpp"
  36 #include "services/memTracker.hpp"
  37 #include "utilities/ostream.hpp"
  38 
  39 #ifdef TARGET_OS_FAMILY_linux
  40 # include "os_linux.inline.hpp"
  41 #endif
  42 #ifdef TARGET_OS_FAMILY_solaris
  43 # include "os_solaris.inline.hpp"
  44 #endif
  45 #ifdef TARGET_OS_FAMILY_windows
  46 # include "os_windows.inline.hpp"
  47 #endif
  48 #ifdef TARGET_OS_FAMILY_bsd
  49 # include "os_bsd.inline.hpp"
  50 #endif
  51 
  52 void* StackObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
  53 void  StackObj::operator delete(void* p)   { ShouldNotCallThis(); };
  54 void* _ValueObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
  55 void  _ValueObj::operator delete(void* p)   { ShouldNotCallThis(); };
  56 
  57 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
  58                                 size_t word_size, bool read_only, TRAPS) {
  59   // Klass has it's own operator new
  60   return Metaspace::allocate(loader_data, word_size, read_only,
  61                              Metaspace::NonClassType, CHECK_NULL);
  62 }
  63 
  64 bool MetaspaceObj::is_shared() const {
  65   return MetaspaceShared::is_in_shared_space(this);
  66 }
  67 
  68 bool MetaspaceObj::is_metadata() const {
  69   // GC Verify checks use this in guarantees.
  70   // TODO: either replace them with is_metaspace_object() or remove them.
  71   // is_metaspace_object() is slower than this test.  This test doesn't
  72   // seem very useful for metaspace objects anymore though.
  73   return !Universe::heap()->is_in_reserved(this);
  74 }
  75 
  76 bool MetaspaceObj::is_metaspace_object() const {
  77   return Metaspace::contains((void*)this);
  78 }
  79 
  80 void MetaspaceObj::print_address_on(outputStream* st) const {
  81   st->print(" {"INTPTR_FORMAT"}", this);
  82 }
  83 
  84 
  85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
  86   address res;
  87   switch (type) {
  88    case C_HEAP:
  89     res = (address)AllocateHeap(size, flags, CALLER_PC);
  90     DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
  91     break;
  92    case RESOURCE_AREA:
  93     // new(size) sets allocation type RESOURCE_AREA.
  94     res = (address)operator new(size);
  95     break;
  96    default:
  97     ShouldNotReachHere();
  98   }
  99   return res;
 100 }
 101 
 102 void* ResourceObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
 103     allocation_type type, MEMFLAGS flags) {
 104   //should only call this with std::nothrow, use other operator new() otherwise
 105   address res;
 106   switch (type) {
 107    case C_HEAP:
 108     res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
 109     DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
 110     break;
 111    case RESOURCE_AREA:
 112     // new(size) sets allocation type RESOURCE_AREA.
 113     res = (address)operator new(size, std::nothrow);
 114     break;
 115    default:
 116     ShouldNotReachHere();
 117   }
 118   return res;
 119 }
 120 
 121 
 122 void ResourceObj::operator delete(void* p) {
 123   assert(((ResourceObj *)p)->allocated_on_C_heap(),
 124          "delete only allowed for C_HEAP objects");
 125   DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
 126   FreeHeap(p);
 127 }
 128 
 129 #ifdef ASSERT
 130 void ResourceObj::set_allocation_type(address res, allocation_type type) {
 131     // Set allocation type in the resource object
 132     uintptr_t allocation = (uintptr_t)res;
 133     assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
 134     assert(type <= allocation_mask, "incorrect allocation type");
 135     ResourceObj* resobj = (ResourceObj *)res;
 136     resobj->_allocation_t[0] = ~(allocation + type);
 137     if (type != STACK_OR_EMBEDDED) {
 138       // Called from operator new() and CollectionSetChooser(),
 139       // set verification value.
 140       resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
 141     }
 142 }
 143 
 144 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
 145     assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
 146     return (allocation_type)((~_allocation_t[0]) & allocation_mask);
 147 }
 148 
 149 bool ResourceObj::is_type_set() const {
 150     allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
 151     return get_allocation_type()  == type &&
 152            (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
 153 }
 154 
 155 ResourceObj::ResourceObj() { // default constructor
 156     if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
 157       // Operator new() is not called for allocations
 158       // on stack and for embedded objects.
 159       set_allocation_type((address)this, STACK_OR_EMBEDDED);
 160     } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
 161       // For some reason we got a value which resembles
 162       // an embedded or stack object (operator new() does not
 163       // set such type). Keep it since it is valid value
 164       // (even if it was garbage).
 165       // Ignore garbage in other fields.
 166     } else if (is_type_set()) {
 167       // Operator new() was called and type was set.
 168       assert(!allocated_on_stack(),
 169              err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 170                      this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 171     } else {
 172       // Operator new() was not called.
 173       // Assume that it is embedded or stack object.
 174       set_allocation_type((address)this, STACK_OR_EMBEDDED);
 175     }
 176     _allocation_t[1] = 0; // Zap verification value
 177 }
 178 
 179 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
 180     // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
 181     // Note: garbage may resembles valid value.
 182     assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
 183            err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 184                    this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 185     set_allocation_type((address)this, STACK_OR_EMBEDDED);
 186     _allocation_t[1] = 0; // Zap verification value
 187 }
 188 
 189 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
 190     // Used in InlineTree::ok_to_inline() for WarmCallInfo.
 191     assert(allocated_on_stack(),
 192            err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 193                    this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 194     // Keep current _allocation_t value;
 195     return *this;
 196 }
 197 
 198 ResourceObj::~ResourceObj() {
 199     // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
 200     if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
 201       _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
 202     }
 203 }
 204 #endif // ASSERT
 205 
 206 
 207 void trace_heap_malloc(size_t size, const char* name, void* p) {
 208   // A lock is not needed here - tty uses a lock internally
 209   tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
 210 }
 211 
 212 
 213 void trace_heap_free(void* p) {
 214   // A lock is not needed here - tty uses a lock internally
 215   tty->print_cr("Heap free   " INTPTR_FORMAT, p);
 216 }
 217 
 218 bool warn_new_operator = false; // see vm_main
 219 
 220 //--------------------------------------------------------------------------------------
 221 // ChunkPool implementation
 222 
 223 // MT-safe pool of chunks to reduce malloc/free thrashing
 224 // NB: not using Mutex because pools are used before Threads are initialized
 225 class ChunkPool: public CHeapObj<mtInternal> {
 226   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
 227   size_t       _num_chunks;   // number of unused chunks in pool
 228   size_t       _num_used;     // number of chunks currently checked out
 229   const size_t _size;         // size of each chunk (must be uniform)
 230 
 231   // Our three static pools
 232   static ChunkPool* _large_pool;
 233   static ChunkPool* _medium_pool;
 234   static ChunkPool* _small_pool;
 235 
 236   // return first element or null
 237   void* get_first() {
 238     Chunk* c = _first;
 239     if (_first) {
 240       _first = _first->next();
 241       _num_chunks--;
 242     }
 243     return c;
 244   }
 245 
 246  public:
 247   // All chunks in a ChunkPool has the same size
 248    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
 249 
 250   // Allocate a new chunk from the pool (might expand the pool)
 251   _NOINLINE_ void* allocate(size_t bytes) {
 252     assert(bytes == _size, "bad size");
 253     void* p = NULL;
 254     // No VM lock can be taken inside ThreadCritical lock, so os::malloc
 255     // should be done outside ThreadCritical lock due to NMT
 256     { ThreadCritical tc;
 257       _num_used++;
 258       p = get_first();
 259     }
 260     if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
 261     if (p == NULL)
 262       vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
 263 
 264     return p;
 265   }
 266 
 267   // Return a chunk to the pool
 268   void free(Chunk* chunk) {
 269     assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
 270     ThreadCritical tc;
 271     _num_used--;
 272 
 273     // Add chunk to list
 274     chunk->set_next(_first);
 275     _first = chunk;
 276     _num_chunks++;
 277   }
 278 
 279   // Prune the pool
 280   void free_all_but(size_t n) {
 281     Chunk* cur = NULL;
 282     Chunk* next;
 283     {
 284     // if we have more than n chunks, free all of them
 285     ThreadCritical tc;
 286     if (_num_chunks > n) {
 287       // free chunks at end of queue, for better locality
 288         cur = _first;
 289       for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
 290 
 291       if (cur != NULL) {
 292           next = cur->next();
 293         cur->set_next(NULL);
 294         cur = next;
 295 
 296           _num_chunks = n;
 297         }
 298       }
 299     }
 300 
 301     // Free all remaining chunks, outside of ThreadCritical
 302     // to avoid deadlock with NMT
 303         while(cur != NULL) {
 304           next = cur->next();
 305       os::free(cur, mtChunk);
 306           cur = next;
 307         }
 308       }
 309 
 310   // Accessors to preallocated pool's
 311   static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
 312   static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
 313   static ChunkPool* small_pool()  { assert(_small_pool  != NULL, "must be initialized"); return _small_pool;  }
 314 
 315   static void initialize() {
 316     _large_pool  = new ChunkPool(Chunk::size        + Chunk::aligned_overhead_size());
 317     _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
 318     _small_pool  = new ChunkPool(Chunk::init_size   + Chunk::aligned_overhead_size());
 319   }
 320 
 321   static void clean() {
 322     enum { BlocksToKeep = 5 };
 323      _small_pool->free_all_but(BlocksToKeep);
 324      _medium_pool->free_all_but(BlocksToKeep);
 325      _large_pool->free_all_but(BlocksToKeep);
 326   }
 327 };
 328 
 329 ChunkPool* ChunkPool::_large_pool  = NULL;
 330 ChunkPool* ChunkPool::_medium_pool = NULL;
 331 ChunkPool* ChunkPool::_small_pool  = NULL;
 332 
 333 void chunkpool_init() {
 334   ChunkPool::initialize();
 335 }
 336 
 337 void
 338 Chunk::clean_chunk_pool() {
 339   ChunkPool::clean();
 340 }
 341 
 342 
 343 //--------------------------------------------------------------------------------------
 344 // ChunkPoolCleaner implementation
 345 //
 346 
 347 class ChunkPoolCleaner : public PeriodicTask {
 348   enum { CleaningInterval = 5000 };      // cleaning interval in ms
 349 
 350  public:
 351    ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
 352    void task() {
 353      ChunkPool::clean();
 354    }
 355 };
 356 
 357 //--------------------------------------------------------------------------------------
 358 // Chunk implementation
 359 
 360 void* Chunk::operator new(size_t requested_size, size_t length) {
 361   // requested_size is equal to sizeof(Chunk) but in order for the arena
 362   // allocations to come out aligned as expected the size must be aligned
 363   // to expected arean alignment.
 364   // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
 365   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
 366   size_t bytes = ARENA_ALIGN(requested_size) + length;
 367   switch (length) {
 368    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
 369    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
 370    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
 371    default: {
 372      void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
 373      if (p == NULL)
 374        vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
 375      return p;
 376    }
 377   }
 378 }
 379 
 380 void Chunk::operator delete(void* p) {
 381   Chunk* c = (Chunk*)p;
 382   switch (c->length()) {
 383    case Chunk::size:        ChunkPool::large_pool()->free(c); break;
 384    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
 385    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
 386    default:                 os::free(c, mtChunk);
 387   }
 388 }
 389 
 390 Chunk::Chunk(size_t length) : _len(length) {
 391   _next = NULL;         // Chain on the linked list
 392 }
 393 
 394 
 395 void Chunk::chop() {
 396   Chunk *k = this;
 397   while( k ) {
 398     Chunk *tmp = k->next();
 399     // clear out this chunk (to detect allocation bugs)
 400     if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
 401     delete k;                   // Free chunk (was malloc'd)
 402     k = tmp;
 403   }
 404 }
 405 
 406 void Chunk::next_chop() {
 407   _next->chop();
 408   _next = NULL;
 409 }
 410 
 411 
 412 void Chunk::start_chunk_pool_cleaner_task() {
 413 #ifdef ASSERT
 414   static bool task_created = false;
 415   assert(!task_created, "should not start chuck pool cleaner twice");
 416   task_created = true;
 417 #endif
 418   ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
 419   cleaner->enroll();
 420 }
 421 
 422 //------------------------------Arena------------------------------------------
 423 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
 424 
 425 Arena::Arena(size_t init_size) {
 426   size_t round_size = (sizeof (char *)) - 1;
 427   init_size = (init_size+round_size) & ~round_size;
 428   _first = _chunk = new (init_size) Chunk(init_size);
 429   _hwm = _chunk->bottom();      // Save the cached hwm, max
 430   _max = _chunk->top();
 431   set_size_in_bytes(init_size);
 432   NOT_PRODUCT(Atomic::inc(&_instance_count);)
 433 }
 434 
 435 Arena::Arena() {
 436   _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
 437   _hwm = _chunk->bottom();      // Save the cached hwm, max
 438   _max = _chunk->top();
 439   set_size_in_bytes(Chunk::init_size);
 440   NOT_PRODUCT(Atomic::inc(&_instance_count);)
 441 }
 442 
 443 Arena *Arena::move_contents(Arena *copy) {
 444   copy->destruct_contents();
 445   copy->_chunk = _chunk;
 446   copy->_hwm   = _hwm;
 447   copy->_max   = _max;
 448   copy->_first = _first;
 449 
 450   // workaround rare racing condition, which could double count
 451   // the arena size by native memory tracking
 452   size_t size = size_in_bytes();
 453   set_size_in_bytes(0);
 454   copy->set_size_in_bytes(size);
 455   // Destroy original arena
 456   reset();
 457   return copy;            // Return Arena with contents
 458 }
 459 
 460 Arena::~Arena() {
 461   destruct_contents();
 462   NOT_PRODUCT(Atomic::dec(&_instance_count);)
 463 }
 464 
 465 void* Arena::operator new(size_t size) {
 466   assert(false, "Use dynamic memory type binding");
 467   return NULL;
 468 }
 469 
 470 void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
 471   assert(false, "Use dynamic memory type binding");
 472   return NULL;
 473 }
 474 
 475   // dynamic memory type binding
 476 void* Arena::operator new(size_t size, MEMFLAGS flags) {
 477 #ifdef ASSERT
 478   void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
 479   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
 480   return p;
 481 #else
 482   return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
 483 #endif
 484 }
 485 
 486 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
 487 #ifdef ASSERT
 488   void* p = os::malloc(size, flags|otArena, CALLER_PC);
 489   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
 490   return p;
 491 #else
 492   return os::malloc(size, flags|otArena, CALLER_PC);
 493 #endif
 494 }
 495 
 496 void Arena::operator delete(void* p) {
 497   FreeHeap(p);
 498 }
 499 
 500 // Destroy this arenas contents and reset to empty
 501 void Arena::destruct_contents() {
 502   if (UseMallocOnly && _first != NULL) {
 503     char* end = _first->next() ? _first->top() : _hwm;
 504     free_malloced_objects(_first, _first->bottom(), end, _hwm);
 505   }
 506   // reset size before chop to avoid a rare racing condition
 507   // that can have total arena memory exceed total chunk memory
 508   set_size_in_bytes(0);
 509   _first->chop();
 510   reset();
 511 }
 512 
 513 // This is high traffic method, but many calls actually don't
 514 // change the size
 515 void Arena::set_size_in_bytes(size_t size) {
 516   if (_size_in_bytes != size) {
 517     _size_in_bytes = size;
 518     NMTTrackOp op(NMTTrackOp::ArenaSizeOp);
 519     op.execute_op((address)this, size);
 520   }
 521 }
 522 
 523 // Total of all Chunks in arena
 524 size_t Arena::used() const {
 525   size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
 526   register Chunk *k = _first;
 527   while( k != _chunk) {         // Whilst have Chunks in a row
 528     sum += k->length();         // Total size of this Chunk
 529     k = k->next();              // Bump along to next Chunk
 530   }
 531   return sum;                   // Return total consumed space.
 532 }
 533 
 534 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
 535   vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
 536 }
 537 
 538 // Grow a new Chunk
 539 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
 540   // Get minimal required size.  Either real big, or even bigger for giant objs
 541   size_t len = MAX2(x, (size_t) Chunk::size);
 542 
 543   Chunk *k = _chunk;            // Get filled-up chunk address
 544   _chunk = new (len) Chunk(len);
 545 
 546   if (_chunk == NULL) {
 547     if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 548       signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
 549     }
 550     return NULL;
 551   }
 552   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
 553   else _first = _chunk;
 554   _hwm  = _chunk->bottom();     // Save the cached hwm, max
 555   _max =  _chunk->top();
 556   set_size_in_bytes(size_in_bytes() + len);
 557   void* result = _hwm;
 558   _hwm += x;
 559   return result;
 560 }
 561 
 562 
 563 
 564 // Reallocate storage in Arena.
 565 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
 566   assert(new_size >= 0, "bad size");
 567   if (new_size == 0) return NULL;
 568 #ifdef ASSERT
 569   if (UseMallocOnly) {
 570     // always allocate a new object  (otherwise we'll free this one twice)
 571     char* copy = (char*)Amalloc(new_size, alloc_failmode);
 572     if (copy == NULL) {
 573       return NULL;
 574     }
 575     size_t n = MIN2(old_size, new_size);
 576     if (n > 0) memcpy(copy, old_ptr, n);
 577     Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
 578     return copy;
 579   }
 580 #endif
 581   char *c_old = (char*)old_ptr; // Handy name
 582   // Stupid fast special case
 583   if( new_size <= old_size ) {  // Shrink in-place
 584     if( c_old+old_size == _hwm) // Attempt to free the excess bytes
 585       _hwm = c_old+new_size;    // Adjust hwm
 586     return c_old;
 587   }
 588 
 589   // make sure that new_size is legal
 590   size_t corrected_new_size = ARENA_ALIGN(new_size);
 591 
 592   // See if we can resize in-place
 593   if( (c_old+old_size == _hwm) &&       // Adjusting recent thing
 594       (c_old+corrected_new_size <= _max) ) {      // Still fits where it sits
 595     _hwm = c_old+corrected_new_size;      // Adjust hwm
 596     return c_old;               // Return old pointer
 597   }
 598 
 599   // Oops, got to relocate guts
 600   void *new_ptr = Amalloc(new_size, alloc_failmode);
 601   if (new_ptr == NULL) {
 602     return NULL;
 603   }
 604   memcpy( new_ptr, c_old, old_size );
 605   Afree(c_old,old_size);        // Mostly done to keep stats accurate
 606   return new_ptr;
 607 }
 608 
 609 
 610 // Determine if pointer belongs to this Arena or not.
 611 bool Arena::contains( const void *ptr ) const {
 612 #ifdef ASSERT
 613   if (UseMallocOnly) {
 614     // really slow, but not easy to make fast
 615     if (_chunk == NULL) return false;
 616     char** bottom = (char**)_chunk->bottom();
 617     for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
 618       if (*p == ptr) return true;
 619     }
 620     for (Chunk *c = _first; c != NULL; c = c->next()) {
 621       if (c == _chunk) continue;  // current chunk has been processed
 622       char** bottom = (char**)c->bottom();
 623       for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
 624         if (*p == ptr) return true;
 625       }
 626     }
 627     return false;
 628   }
 629 #endif
 630   if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
 631     return true;                // Check for in this chunk
 632   for (Chunk *c = _first; c; c = c->next()) {
 633     if (c == _chunk) continue;  // current chunk has been processed
 634     if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
 635       return true;              // Check for every chunk in Arena
 636     }
 637   }
 638   return false;                 // Not in any Chunk, so not in Arena
 639 }
 640 
 641 
 642 #ifdef ASSERT
 643 void* Arena::malloc(size_t size) {
 644   assert(UseMallocOnly, "shouldn't call");
 645   // use malloc, but save pointer in res. area for later freeing
 646   char** save = (char**)internal_malloc_4(sizeof(char*));
 647   return (*save = (char*)os::malloc(size, mtChunk));
 648 }
 649 
 650 // for debugging with UseMallocOnly
 651 void* Arena::internal_malloc_4(size_t x) {
 652   assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
 653   check_for_overflow(x, "Arena::internal_malloc_4");
 654   if (_hwm + x > _max) {
 655     return grow(x);
 656   } else {
 657     char *old = _hwm;
 658     _hwm += x;
 659     return old;
 660   }
 661 }
 662 #endif
 663 
 664 
 665 //--------------------------------------------------------------------------------------
 666 // Non-product code
 667 
 668 #ifndef PRODUCT
 669 // The global operator new should never be called since it will usually indicate
 670 // a memory leak.  Use CHeapObj as the base class of such objects to make it explicit
 671 // that they're allocated on the C heap.
 672 // Commented out in product version to avoid conflicts with third-party C++ native code.
 673 // %% note this is causing a problem on solaris debug build. the global
 674 // new is being called from jdk source and causing data corruption.
 675 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
 676 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
 677 #ifdef CATCH_OPERATOR_NEW_USAGE
 678 void* operator new(size_t size){
 679   static bool warned = false;
 680   if (!warned && warn_new_operator)
 681     warning("should not call global (default) operator new");
 682   warned = true;
 683   return (void *) AllocateHeap(size, "global operator new");
 684 }
 685 #endif
 686 
 687 void AllocatedObj::print() const       { print_on(tty); }
 688 void AllocatedObj::print_value() const { print_value_on(tty); }
 689 
 690 void AllocatedObj::print_on(outputStream* st) const {
 691   st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
 692 }
 693 
 694 void AllocatedObj::print_value_on(outputStream* st) const {
 695   st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
 696 }
 697 
 698 julong Arena::_bytes_allocated = 0;
 699 
 700 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
 701 
 702 AllocStats::AllocStats() {
 703   start_mallocs      = os::num_mallocs;
 704   start_frees        = os::num_frees;
 705   start_malloc_bytes = os::alloc_bytes;
 706   start_mfree_bytes  = os::free_bytes;
 707   start_res_bytes    = Arena::_bytes_allocated;
 708 }
 709 
 710 julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
 711 julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
 712 julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
 713 julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
 714 julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
 715 void    AllocStats::print() {
 716   tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
 717                 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
 718                 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
 719 }
 720 
 721 
 722 // debugging code
 723 inline void Arena::free_all(char** start, char** end) {
 724   for (char** p = start; p < end; p++) if (*p) os::free(*p);
 725 }
 726 
 727 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
 728   assert(UseMallocOnly, "should not call");
 729   // free all objects malloced since resource mark was created; resource area
 730   // contains their addresses
 731   if (chunk->next()) {
 732     // this chunk is full, and some others too
 733     for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
 734       char* top = c->top();
 735       if (c->next() == NULL) {
 736         top = hwm2;     // last junk is only used up to hwm2
 737         assert(c->contains(hwm2), "bad hwm2");
 738       }
 739       free_all((char**)c->bottom(), (char**)top);
 740     }
 741     assert(chunk->contains(hwm), "bad hwm");
 742     assert(chunk->contains(max), "bad max");
 743     free_all((char**)hwm, (char**)max);
 744   } else {
 745     // this chunk was partially used
 746     assert(chunk->contains(hwm), "bad hwm");
 747     assert(chunk->contains(hwm2), "bad hwm2");
 748     free_all((char**)hwm, (char**)hwm2);
 749   }
 750 }
 751 
 752 
 753 ReallocMark::ReallocMark() {
 754 #ifdef ASSERT
 755   Thread *thread = ThreadLocalStorage::get_thread_slow();
 756   _nesting = thread->resource_area()->nesting();
 757 #endif
 758 }
 759 
 760 void ReallocMark::check() {
 761 #ifdef ASSERT
 762   if (_nesting != Thread::current()->resource_area()->nesting()) {
 763     fatal("allocation bug: array could grow within nested ResourceMark");
 764   }
 765 #endif
 766 }
 767 
 768 #endif // Non-product