1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/genCollectedHeap.hpp" 29 #include "memory/metaspaceShared.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "memory/universe.hpp" 32 #include "runtime/atomic.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/task.hpp" 35 #include "runtime/threadCritical.hpp" 36 #include "services/memTracker.hpp" 37 #include "utilities/ostream.hpp" 38 39 #ifdef TARGET_OS_FAMILY_linux 40 # include "os_linux.inline.hpp" 41 #endif 42 #ifdef TARGET_OS_FAMILY_solaris 43 # include "os_solaris.inline.hpp" 44 #endif 45 #ifdef TARGET_OS_FAMILY_windows 46 # include "os_windows.inline.hpp" 47 #endif 48 #ifdef TARGET_OS_FAMILY_aix 49 # include "os_aix.inline.hpp" 50 #endif 51 #ifdef TARGET_OS_FAMILY_bsd 52 # include "os_bsd.inline.hpp" 53 #endif 54 55 void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 56 void StackObj::operator delete(void* p) { ShouldNotCallThis(); } 57 void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 58 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } 59 60 void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 61 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } 62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 63 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } 64 65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 66 size_t word_size, bool read_only, 67 MetaspaceObj::Type type, TRAPS) throw() { 68 // Klass has it's own operator new 69 return Metaspace::allocate(loader_data, word_size, read_only, 70 type, CHECK_NULL); 71 } 72 73 bool MetaspaceObj::is_shared() const { 74 return MetaspaceShared::is_in_shared_space(this); 75 } 76 77 bool MetaspaceObj::is_metaspace_object() const { 78 return ClassLoaderDataGraph::contains((void*)this); 79 } 80 81 void MetaspaceObj::print_address_on(outputStream* st) const { 82 st->print(" {"INTPTR_FORMAT"}", this); 83 } 84 85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { 86 address res; 87 switch (type) { 88 case C_HEAP: 89 res = (address)AllocateHeap(size, flags, CALLER_PC); 90 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) 91 break; 92 case RESOURCE_AREA: 93 // new(size) sets allocation type RESOURCE_AREA. 94 res = (address)operator new(size); 95 break; 96 default: 97 ShouldNotReachHere(); 98 } 99 return res; 100 } 101 102 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { 103 return (address) operator new(size, type, flags); 104 } 105 106 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, 107 allocation_type type, MEMFLAGS flags) throw() { 108 //should only call this with std::nothrow, use other operator new() otherwise 109 address res; 110 switch (type) { 111 case C_HEAP: 112 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); 113 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) 114 break; 115 case RESOURCE_AREA: 116 // new(size) sets allocation type RESOURCE_AREA. 117 res = (address)operator new(size, std::nothrow); 118 break; 119 default: 120 ShouldNotReachHere(); 121 } 122 return res; 123 } 124 125 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, 126 allocation_type type, MEMFLAGS flags) throw() { 127 return (address)operator new(size, nothrow_constant, type, flags); 128 } 129 130 void ResourceObj::operator delete(void* p) { 131 assert(((ResourceObj *)p)->allocated_on_C_heap(), 132 "delete only allowed for C_HEAP objects"); 133 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 134 FreeHeap(p); 135 } 136 137 void ResourceObj::operator delete [](void* p) { 138 operator delete(p); 139 } 140 141 #ifdef ASSERT 142 void ResourceObj::set_allocation_type(address res, allocation_type type) { 143 // Set allocation type in the resource object 144 uintptr_t allocation = (uintptr_t)res; 145 assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " PTR_FORMAT, res)); 146 assert(type <= allocation_mask, "incorrect allocation type"); 147 ResourceObj* resobj = (ResourceObj *)res; 148 resobj->_allocation_t[0] = ~(allocation + type); 149 if (type != STACK_OR_EMBEDDED) { 150 // Called from operator new() and CollectionSetChooser(), 151 // set verification value. 152 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 153 } 154 } 155 156 ResourceObj::allocation_type ResourceObj::get_allocation_type() const { 157 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 158 return (allocation_type)((~_allocation_t[0]) & allocation_mask); 159 } 160 161 bool ResourceObj::is_type_set() const { 162 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 163 return get_allocation_type() == type && 164 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 165 } 166 167 ResourceObj::ResourceObj() { // default constructor 168 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 169 // Operator new() is not called for allocations 170 // on stack and for embedded objects. 171 set_allocation_type((address)this, STACK_OR_EMBEDDED); 172 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED 173 // For some reason we got a value which resembles 174 // an embedded or stack object (operator new() does not 175 // set such type). Keep it since it is valid value 176 // (even if it was garbage). 177 // Ignore garbage in other fields. 178 } else if (is_type_set()) { 179 // Operator new() was called and type was set. 180 assert(!allocated_on_stack(), 181 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 182 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 183 } else { 184 // Operator new() was not called. 185 // Assume that it is embedded or stack object. 186 set_allocation_type((address)this, STACK_OR_EMBEDDED); 187 } 188 _allocation_t[1] = 0; // Zap verification value 189 } 190 191 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor 192 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. 193 // Note: garbage may resembles valid value. 194 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), 195 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 196 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 197 set_allocation_type((address)this, STACK_OR_EMBEDDED); 198 _allocation_t[1] = 0; // Zap verification value 199 } 200 201 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment 202 // Used in InlineTree::ok_to_inline() for WarmCallInfo. 203 assert(allocated_on_stack(), 204 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 205 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 206 // Keep current _allocation_t value; 207 return *this; 208 } 209 210 ResourceObj::~ResourceObj() { 211 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 212 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. 213 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 214 } 215 } 216 #endif // ASSERT 217 218 219 void trace_heap_malloc(size_t size, const char* name, void* p) { 220 // A lock is not needed here - tty uses a lock internally 221 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name); 222 } 223 224 225 void trace_heap_free(void* p) { 226 // A lock is not needed here - tty uses a lock internally 227 tty->print_cr("Heap free " INTPTR_FORMAT, p); 228 } 229 230 //-------------------------------------------------------------------------------------- 231 // ChunkPool implementation 232 233 // MT-safe pool of chunks to reduce malloc/free thrashing 234 // NB: not using Mutex because pools are used before Threads are initialized 235 class ChunkPool: public CHeapObj<mtInternal> { 236 Chunk* _first; // first cached Chunk; its first word points to next chunk 237 size_t _num_chunks; // number of unused chunks in pool 238 size_t _num_used; // number of chunks currently checked out 239 const size_t _size; // size of each chunk (must be uniform) 240 241 // Our four static pools 242 static ChunkPool* _large_pool; 243 static ChunkPool* _medium_pool; 244 static ChunkPool* _small_pool; 245 static ChunkPool* _tiny_pool; 246 247 // return first element or null 248 void* get_first() { 249 Chunk* c = _first; 250 if (_first) { 251 _first = _first->next(); 252 _num_chunks--; 253 } 254 return c; 255 } 256 257 public: 258 // All chunks in a ChunkPool has the same size 259 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } 260 261 // Allocate a new chunk from the pool (might expand the pool) 262 _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { 263 assert(bytes == _size, "bad size"); 264 void* p = NULL; 265 // No VM lock can be taken inside ThreadCritical lock, so os::malloc 266 // should be done outside ThreadCritical lock due to NMT 267 { ThreadCritical tc; 268 _num_used++; 269 p = get_first(); 270 } 271 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); 272 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 273 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); 274 } 275 return p; 276 } 277 278 // Return a chunk to the pool 279 void free(Chunk* chunk) { 280 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); 281 ThreadCritical tc; 282 _num_used--; 283 284 // Add chunk to list 285 chunk->set_next(_first); 286 _first = chunk; 287 _num_chunks++; 288 } 289 290 // Prune the pool 291 void free_all_but(size_t n) { 292 Chunk* cur = NULL; 293 Chunk* next; 294 { 295 // if we have more than n chunks, free all of them 296 ThreadCritical tc; 297 if (_num_chunks > n) { 298 // free chunks at end of queue, for better locality 299 cur = _first; 300 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); 301 302 if (cur != NULL) { 303 next = cur->next(); 304 cur->set_next(NULL); 305 cur = next; 306 307 _num_chunks = n; 308 } 309 } 310 } 311 312 // Free all remaining chunks, outside of ThreadCritical 313 // to avoid deadlock with NMT 314 while(cur != NULL) { 315 next = cur->next(); 316 os::free(cur, mtChunk); 317 cur = next; 318 } 319 } 320 321 // Accessors to preallocated pool's 322 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } 323 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } 324 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } 325 static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } 326 327 static void initialize() { 328 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); 329 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); 330 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); 331 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); 332 } 333 334 static void clean() { 335 enum { BlocksToKeep = 5 }; 336 _tiny_pool->free_all_but(BlocksToKeep); 337 _small_pool->free_all_but(BlocksToKeep); 338 _medium_pool->free_all_but(BlocksToKeep); 339 _large_pool->free_all_but(BlocksToKeep); 340 } 341 }; 342 343 ChunkPool* ChunkPool::_large_pool = NULL; 344 ChunkPool* ChunkPool::_medium_pool = NULL; 345 ChunkPool* ChunkPool::_small_pool = NULL; 346 ChunkPool* ChunkPool::_tiny_pool = NULL; 347 348 void chunkpool_init() { 349 ChunkPool::initialize(); 350 } 351 352 void 353 Chunk::clean_chunk_pool() { 354 ChunkPool::clean(); 355 } 356 357 358 //-------------------------------------------------------------------------------------- 359 // ChunkPoolCleaner implementation 360 // 361 362 class ChunkPoolCleaner : public PeriodicTask { 363 enum { CleaningInterval = 5000 }; // cleaning interval in ms 364 365 public: 366 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} 367 void task() { 368 ChunkPool::clean(); 369 } 370 }; 371 372 //-------------------------------------------------------------------------------------- 373 // Chunk implementation 374 375 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { 376 // requested_size is equal to sizeof(Chunk) but in order for the arena 377 // allocations to come out aligned as expected the size must be aligned 378 // to expected arena alignment. 379 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. 380 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); 381 size_t bytes = ARENA_ALIGN(requested_size) + length; 382 switch (length) { 383 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); 384 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); 385 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); 386 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); 387 default: { 388 void* p = os::malloc(bytes, mtChunk, CALLER_PC); 389 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 390 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); 391 } 392 return p; 393 } 394 } 395 } 396 397 void Chunk::operator delete(void* p) { 398 Chunk* c = (Chunk*)p; 399 switch (c->length()) { 400 case Chunk::size: ChunkPool::large_pool()->free(c); break; 401 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; 402 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; 403 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; 404 default: os::free(c, mtChunk); 405 } 406 } 407 408 Chunk::Chunk(size_t length) : _len(length) { 409 _next = NULL; // Chain on the linked list 410 } 411 412 413 void Chunk::chop() { 414 Chunk *k = this; 415 while( k ) { 416 Chunk *tmp = k->next(); 417 // clear out this chunk (to detect allocation bugs) 418 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); 419 delete k; // Free chunk (was malloc'd) 420 k = tmp; 421 } 422 } 423 424 void Chunk::next_chop() { 425 _next->chop(); 426 _next = NULL; 427 } 428 429 430 void Chunk::start_chunk_pool_cleaner_task() { 431 #ifdef ASSERT 432 static bool task_created = false; 433 assert(!task_created, "should not start chuck pool cleaner twice"); 434 task_created = true; 435 #endif 436 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); 437 cleaner->enroll(); 438 } 439 440 //------------------------------Arena------------------------------------------ 441 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) 442 443 Arena::Arena(size_t init_size) { 444 size_t round_size = (sizeof (char *)) - 1; 445 init_size = (init_size+round_size) & ~round_size; 446 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); 447 _hwm = _chunk->bottom(); // Save the cached hwm, max 448 _max = _chunk->top(); 449 set_size_in_bytes(init_size); 450 NOT_PRODUCT(Atomic::inc(&_instance_count);) 451 } 452 453 Arena::Arena() { 454 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); 455 _hwm = _chunk->bottom(); // Save the cached hwm, max 456 _max = _chunk->top(); 457 set_size_in_bytes(Chunk::init_size); 458 NOT_PRODUCT(Atomic::inc(&_instance_count);) 459 } 460 461 Arena *Arena::move_contents(Arena *copy) { 462 copy->destruct_contents(); 463 copy->_chunk = _chunk; 464 copy->_hwm = _hwm; 465 copy->_max = _max; 466 copy->_first = _first; 467 468 // workaround rare racing condition, which could double count 469 // the arena size by native memory tracking 470 size_t size = size_in_bytes(); 471 set_size_in_bytes(0); 472 copy->set_size_in_bytes(size); 473 // Destroy original arena 474 reset(); 475 return copy; // Return Arena with contents 476 } 477 478 Arena::~Arena() { 479 destruct_contents(); 480 NOT_PRODUCT(Atomic::dec(&_instance_count);) 481 } 482 483 void* Arena::operator new(size_t size) throw() { 484 assert(false, "Use dynamic memory type binding"); 485 return NULL; 486 } 487 488 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { 489 assert(false, "Use dynamic memory type binding"); 490 return NULL; 491 } 492 493 // dynamic memory type binding 494 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { 495 #ifdef ASSERT 496 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); 497 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 498 return p; 499 #else 500 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); 501 #endif 502 } 503 504 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { 505 #ifdef ASSERT 506 void* p = os::malloc(size, flags|otArena, CALLER_PC); 507 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 508 return p; 509 #else 510 return os::malloc(size, flags|otArena, CALLER_PC); 511 #endif 512 } 513 514 void Arena::operator delete(void* p) { 515 FreeHeap(p); 516 } 517 518 // Destroy this arenas contents and reset to empty 519 void Arena::destruct_contents() { 520 if (UseMallocOnly && _first != NULL) { 521 char* end = _first->next() ? _first->top() : _hwm; 522 free_malloced_objects(_first, _first->bottom(), end, _hwm); 523 } 524 // reset size before chop to avoid a rare racing condition 525 // that can have total arena memory exceed total chunk memory 526 set_size_in_bytes(0); 527 _first->chop(); 528 reset(); 529 } 530 531 // This is high traffic method, but many calls actually don't 532 // change the size 533 void Arena::set_size_in_bytes(size_t size) { 534 if (_size_in_bytes != size) { 535 _size_in_bytes = size; 536 MemTracker::record_arena_size((address)this, size); 537 } 538 } 539 540 // Total of all Chunks in arena 541 size_t Arena::used() const { 542 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk 543 register Chunk *k = _first; 544 while( k != _chunk) { // Whilst have Chunks in a row 545 sum += k->length(); // Total size of this Chunk 546 k = k->next(); // Bump along to next Chunk 547 } 548 return sum; // Return total consumed space. 549 } 550 551 void Arena::signal_out_of_memory(size_t sz, const char* whence) const { 552 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); 553 } 554 555 // Grow a new Chunk 556 void* Arena::grow(size_t x, AllocFailType alloc_failmode) { 557 // Get minimal required size. Either real big, or even bigger for giant objs 558 size_t len = MAX2(x, (size_t) Chunk::size); 559 560 Chunk *k = _chunk; // Get filled-up chunk address 561 _chunk = new (alloc_failmode, len) Chunk(len); 562 563 if (_chunk == NULL) { 564 return NULL; 565 } 566 if (k) k->set_next(_chunk); // Append new chunk to end of linked list 567 else _first = _chunk; 568 _hwm = _chunk->bottom(); // Save the cached hwm, max 569 _max = _chunk->top(); 570 set_size_in_bytes(size_in_bytes() + len); 571 void* result = _hwm; 572 _hwm += x; 573 return result; 574 } 575 576 577 578 // Reallocate storage in Arena. 579 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { 580 assert(new_size >= 0, "bad size"); 581 if (new_size == 0) return NULL; 582 #ifdef ASSERT 583 if (UseMallocOnly) { 584 // always allocate a new object (otherwise we'll free this one twice) 585 char* copy = (char*)Amalloc(new_size, alloc_failmode); 586 if (copy == NULL) { 587 return NULL; 588 } 589 size_t n = MIN2(old_size, new_size); 590 if (n > 0) memcpy(copy, old_ptr, n); 591 Afree(old_ptr,old_size); // Mostly done to keep stats accurate 592 return copy; 593 } 594 #endif 595 char *c_old = (char*)old_ptr; // Handy name 596 // Stupid fast special case 597 if( new_size <= old_size ) { // Shrink in-place 598 if( c_old+old_size == _hwm) // Attempt to free the excess bytes 599 _hwm = c_old+new_size; // Adjust hwm 600 return c_old; 601 } 602 603 // make sure that new_size is legal 604 size_t corrected_new_size = ARENA_ALIGN(new_size); 605 606 // See if we can resize in-place 607 if( (c_old+old_size == _hwm) && // Adjusting recent thing 608 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits 609 _hwm = c_old+corrected_new_size; // Adjust hwm 610 return c_old; // Return old pointer 611 } 612 613 // Oops, got to relocate guts 614 void *new_ptr = Amalloc(new_size, alloc_failmode); 615 if (new_ptr == NULL) { 616 return NULL; 617 } 618 memcpy( new_ptr, c_old, old_size ); 619 Afree(c_old,old_size); // Mostly done to keep stats accurate 620 return new_ptr; 621 } 622 623 624 // Determine if pointer belongs to this Arena or not. 625 bool Arena::contains( const void *ptr ) const { 626 #ifdef ASSERT 627 if (UseMallocOnly) { 628 // really slow, but not easy to make fast 629 if (_chunk == NULL) return false; 630 char** bottom = (char**)_chunk->bottom(); 631 for (char** p = (char**)_hwm - 1; p >= bottom; p--) { 632 if (*p == ptr) return true; 633 } 634 for (Chunk *c = _first; c != NULL; c = c->next()) { 635 if (c == _chunk) continue; // current chunk has been processed 636 char** bottom = (char**)c->bottom(); 637 for (char** p = (char**)c->top() - 1; p >= bottom; p--) { 638 if (*p == ptr) return true; 639 } 640 } 641 return false; 642 } 643 #endif 644 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 645 return true; // Check for in this chunk 646 for (Chunk *c = _first; c; c = c->next()) { 647 if (c == _chunk) continue; // current chunk has been processed 648 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { 649 return true; // Check for every chunk in Arena 650 } 651 } 652 return false; // Not in any Chunk, so not in Arena 653 } 654 655 656 #ifdef ASSERT 657 void* Arena::malloc(size_t size) { 658 assert(UseMallocOnly, "shouldn't call"); 659 // use malloc, but save pointer in res. area for later freeing 660 char** save = (char**)internal_malloc_4(sizeof(char*)); 661 return (*save = (char*)os::malloc(size, mtChunk)); 662 } 663 664 // for debugging with UseMallocOnly 665 void* Arena::internal_malloc_4(size_t x) { 666 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 667 check_for_overflow(x, "Arena::internal_malloc_4"); 668 if (_hwm + x > _max) { 669 return grow(x); 670 } else { 671 char *old = _hwm; 672 _hwm += x; 673 return old; 674 } 675 } 676 #endif 677 678 679 //-------------------------------------------------------------------------------------- 680 // Non-product code 681 682 #ifndef PRODUCT 683 // The global operator new should never be called since it will usually indicate 684 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit 685 // that they're allocated on the C heap. 686 // Commented out in product version to avoid conflicts with third-party C++ native code. 687 // On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called 688 // from jdk source and causing data corruption. Such as 689 // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair 690 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. 691 // 692 #ifndef ALLOW_OPERATOR_NEW_USAGE 693 void* operator new(size_t size) throw() { 694 assert(false, "Should not call global operator new"); 695 return 0; 696 } 697 698 void* operator new [](size_t size) throw() { 699 assert(false, "Should not call global operator new[]"); 700 return 0; 701 } 702 703 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 704 assert(false, "Should not call global operator new"); 705 return 0; 706 } 707 708 void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { 709 assert(false, "Should not call global operator new[]"); 710 return 0; 711 } 712 713 void operator delete(void* p) { 714 assert(false, "Should not call global delete"); 715 } 716 717 void operator delete [](void* p) { 718 assert(false, "Should not call global delete []"); 719 } 720 #endif // ALLOW_OPERATOR_NEW_USAGE 721 722 void AllocatedObj::print() const { print_on(tty); } 723 void AllocatedObj::print_value() const { print_value_on(tty); } 724 725 void AllocatedObj::print_on(outputStream* st) const { 726 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); 727 } 728 729 void AllocatedObj::print_value_on(outputStream* st) const { 730 st->print("AllocatedObj(" INTPTR_FORMAT ")", this); 731 } 732 733 julong Arena::_bytes_allocated = 0; 734 735 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } 736 737 AllocStats::AllocStats() { 738 start_mallocs = os::num_mallocs; 739 start_frees = os::num_frees; 740 start_malloc_bytes = os::alloc_bytes; 741 start_mfree_bytes = os::free_bytes; 742 start_res_bytes = Arena::_bytes_allocated; 743 } 744 745 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } 746 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 747 julong AllocStats::num_frees() { return os::num_frees - start_frees; } 748 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } 749 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 750 void AllocStats::print() { 751 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " 752 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", 753 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); 754 } 755 756 757 // debugging code 758 inline void Arena::free_all(char** start, char** end) { 759 for (char** p = start; p < end; p++) if (*p) os::free(*p); 760 } 761 762 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { 763 assert(UseMallocOnly, "should not call"); 764 // free all objects malloced since resource mark was created; resource area 765 // contains their addresses 766 if (chunk->next()) { 767 // this chunk is full, and some others too 768 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { 769 char* top = c->top(); 770 if (c->next() == NULL) { 771 top = hwm2; // last junk is only used up to hwm2 772 assert(c->contains(hwm2), "bad hwm2"); 773 } 774 free_all((char**)c->bottom(), (char**)top); 775 } 776 assert(chunk->contains(hwm), "bad hwm"); 777 assert(chunk->contains(max), "bad max"); 778 free_all((char**)hwm, (char**)max); 779 } else { 780 // this chunk was partially used 781 assert(chunk->contains(hwm), "bad hwm"); 782 assert(chunk->contains(hwm2), "bad hwm2"); 783 free_all((char**)hwm, (char**)hwm2); 784 } 785 } 786 787 788 ReallocMark::ReallocMark() { 789 #ifdef ASSERT 790 Thread *thread = ThreadLocalStorage::get_thread_slow(); 791 _nesting = thread->resource_area()->nesting(); 792 #endif 793 } 794 795 void ReallocMark::check() { 796 #ifdef ASSERT 797 if (_nesting != Thread::current()->resource_area()->nesting()) { 798 fatal("allocation bug: array could grow within nested ResourceMark"); 799 } 800 #endif 801 } 802 803 #endif // Non-product