1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP 26 #define SHARE_VM_MEMORY_ALLOCATION_HPP 27 28 #include "runtime/globals.hpp" 29 #include "utilities/globalDefinitions.hpp" 30 #include "utilities/macros.hpp" 31 #ifdef COMPILER1 32 #include "c1/c1_globals.hpp" 33 #endif 34 #ifdef COMPILER2 35 #include "opto/c2_globals.hpp" 36 #endif 37 38 #if defined _WINDOWS && _MSC_VER >= 1900 39 // 'noexcept' used with no exception handling mode specified; termination on exception is not guaranteed. Specify /EHsc 40 #pragma warning( disable : 4577 ) 41 #endif 42 43 #include <new> 44 45 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) 46 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) 47 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) 48 49 50 // noinline attribute 51 #ifdef _WINDOWS 52 #define _NOINLINE_ __declspec(noinline) 53 #else 54 #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute 55 #define _NOINLINE_ 56 #else 57 #define _NOINLINE_ __attribute__ ((noinline)) 58 #endif 59 #endif 60 61 class AllocFailStrategy { 62 public: 63 enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; 64 }; 65 typedef AllocFailStrategy::AllocFailEnum AllocFailType; 66 67 // All classes in the virtual machine must be subclassed 68 // by one of the following allocation classes: 69 // 70 // For objects allocated in the resource area (see resourceArea.hpp). 71 // - ResourceObj 72 // 73 // For objects allocated in the C-heap (managed by: free & malloc). 74 // - CHeapObj 75 // 76 // For objects allocated on the stack. 77 // - StackObj 78 // 79 // For embedded objects. 80 // - ValueObj 81 // 82 // For classes used as name spaces. 83 // - AllStatic 84 // 85 // For classes in Metaspace (class data) 86 // - MetaspaceObj 87 // 88 // The printable subclasses are used for debugging and define virtual 89 // member functions for printing. Classes that avoid allocating the 90 // vtbl entries in the objects should therefore not be the printable 91 // subclasses. 92 // 93 // The following macros and function should be used to allocate memory 94 // directly in the resource area or in the C-heap, The _OBJ variants 95 // of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple 96 // objects which are not inherited from CHeapObj, note constructor and 97 // destructor are not called. The preferable way to allocate objects 98 // is using the new operator. 99 // 100 // WARNING: The array variant must only be used for a homogenous array 101 // where all objects are of the exact type specified. If subtypes are 102 // stored in the array then must pay attention to calling destructors 103 // at needed. 104 // 105 // NEW_RESOURCE_ARRAY(type, size) 106 // NEW_RESOURCE_OBJ(type) 107 // NEW_C_HEAP_ARRAY(type, size) 108 // NEW_C_HEAP_OBJ(type, memflags) 109 // FREE_C_HEAP_ARRAY(type, old) 110 // FREE_C_HEAP_OBJ(objname, type, memflags) 111 // char* AllocateHeap(size_t size, const char* name); 112 // void FreeHeap(void* p); 113 // 114 // C-heap allocation can be traced using +PrintHeapAllocation. 115 // malloc and free should therefore never called directly. 116 117 // Base class for objects allocated in the C-heap. 118 119 // In non product mode we introduce a super class for all allocation classes 120 // that supports printing. 121 // We avoid the superclass in product mode since some C++ compilers add 122 // a word overhead for empty super classes. 123 124 #ifdef PRODUCT 125 #define ALLOCATION_SUPER_CLASS_SPEC 126 #else 127 #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj 128 class AllocatedObj { 129 public: 130 // Printing support 131 void print() const; 132 void print_value() const; 133 134 virtual void print_on(outputStream* st) const; 135 virtual void print_value_on(outputStream* st) const; 136 }; 137 #endif 138 139 140 /* 141 * Memory types 142 */ 143 enum MemoryType { 144 // Memory type by sub systems. It occupies lower byte. 145 mtJavaHeap = 0x00, // Java heap 146 mtClass = 0x01, // memory class for Java classes 147 mtThread = 0x02, // memory for thread objects 148 mtThreadStack = 0x03, 149 mtCode = 0x04, // memory for generated code 150 mtGC = 0x05, // memory for GC 151 mtCompiler = 0x06, // memory for compiler 152 mtInternal = 0x07, // memory used by VM, but does not belong to 153 // any of above categories, and not used for 154 // native memory tracking 155 mtOther = 0x08, // memory not used by VM 156 mtSymbol = 0x09, // symbol 157 mtNMT = 0x0A, // memory used by native memory tracking 158 mtClassShared = 0x0B, // class data sharing 159 mtChunk = 0x0C, // chunk that holds content of arenas 160 mtTest = 0x0D, // Test type for verifying NMT 161 mtTracing = 0x0E, // memory used for Tracing 162 mtLogging = 0x0F, // memory for logging 163 mtNone = 0x10, // undefined 164 mt_number_of_types = 0x11 // number of memory types (mtDontTrack 165 // is not included as validate type) 166 }; 167 168 typedef MemoryType MEMFLAGS; 169 170 171 #if INCLUDE_NMT 172 173 extern bool NMT_track_callsite; 174 175 #else 176 177 const bool NMT_track_callsite = false; 178 179 #endif // INCLUDE_NMT 180 181 class NativeCallStack; 182 183 184 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { 185 public: 186 _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw(); 187 _NOINLINE_ void* operator new(size_t size) throw(); 188 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, 189 const NativeCallStack& stack) throw(); 190 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) 191 throw(); 192 _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw(); 193 _NOINLINE_ void* operator new [](size_t size) throw(); 194 _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, 195 const NativeCallStack& stack) throw(); 196 _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) 197 throw(); 198 void operator delete(void* p); 199 void operator delete [] (void* p); 200 }; 201 202 // Base class for objects allocated on the stack only. 203 // Calling new or delete will result in fatal error. 204 205 class StackObj ALLOCATION_SUPER_CLASS_SPEC { 206 private: 207 void* operator new(size_t size) throw(); 208 void* operator new [](size_t size) throw(); 209 #ifdef __IBMCPP__ 210 public: 211 #endif 212 void operator delete(void* p); 213 void operator delete [](void* p); 214 }; 215 216 // Base class for objects used as value objects. 217 // Calling new or delete will result in fatal error. 218 // 219 // Portability note: Certain compilers (e.g. gcc) will 220 // always make classes bigger if it has a superclass, even 221 // if the superclass does not have any virtual methods or 222 // instance fields. The HotSpot implementation relies on this 223 // not to happen. So never make a ValueObj class a direct subclass 224 // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., 225 // like this: 226 // 227 // class A VALUE_OBJ_CLASS_SPEC { 228 // ... 229 // } 230 // 231 // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can 232 // be defined as a an empty string "". 233 // 234 class _ValueObj { 235 private: 236 void* operator new(size_t size) throw(); 237 void operator delete(void* p); 238 void* operator new [](size_t size) throw(); 239 void operator delete [](void* p); 240 }; 241 242 243 // Base class for objects stored in Metaspace. 244 // Calling delete will result in fatal error. 245 // 246 // Do not inherit from something with a vptr because this class does 247 // not introduce one. This class is used to allocate both shared read-only 248 // and shared read-write classes. 249 // 250 251 class ClassLoaderData; 252 253 class MetaspaceObj { 254 public: 255 bool is_metaspace_object() const; 256 bool is_shared() const; 257 void print_address_on(outputStream* st) const; // nonvirtual address printing 258 259 #define METASPACE_OBJ_TYPES_DO(f) \ 260 f(Unknown) \ 261 f(Class) \ 262 f(Symbol) \ 263 f(TypeArrayU1) \ 264 f(TypeArrayU2) \ 265 f(TypeArrayU4) \ 266 f(TypeArrayU8) \ 267 f(TypeArrayOther) \ 268 f(Method) \ 269 f(ConstMethod) \ 270 f(MethodData) \ 271 f(ConstantPool) \ 272 f(ConstantPoolCache) \ 273 f(Annotation) \ 274 f(MethodCounters) \ 275 f(Deallocated) 276 277 #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, 278 #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; 279 280 enum Type { 281 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 282 METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 283 _number_of_types 284 }; 285 286 static const char * type_name(Type type) { 287 switch(type) { 288 METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 289 default: 290 ShouldNotReachHere(); 291 return NULL; 292 } 293 } 294 295 static MetaspaceObj::Type array_type(size_t elem_size) { 296 switch (elem_size) { 297 case 1: return TypeArrayU1Type; 298 case 2: return TypeArrayU2Type; 299 case 4: return TypeArrayU4Type; 300 case 8: return TypeArrayU8Type; 301 default: 302 return TypeArrayOtherType; 303 } 304 } 305 306 void* operator new(size_t size, ClassLoaderData* loader_data, 307 size_t word_size, bool read_only, 308 Type type, Thread* thread) throw(); 309 // can't use TRAPS from this header file. 310 void operator delete(void* p) { ShouldNotCallThis(); } 311 }; 312 313 // Base class for classes that constitute name spaces. 314 315 class AllStatic { 316 public: 317 AllStatic() { ShouldNotCallThis(); } 318 ~AllStatic() { ShouldNotCallThis(); } 319 }; 320 321 322 //------------------------------Chunk------------------------------------------ 323 // Linked list of raw memory chunks 324 class Chunk: CHeapObj<mtChunk> { 325 friend class VMStructs; 326 327 protected: 328 Chunk* _next; // Next Chunk in list 329 const size_t _len; // Size of this Chunk 330 public: 331 void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw(); 332 void operator delete(void* p); 333 Chunk(size_t length); 334 335 enum { 336 // default sizes; make them slightly smaller than 2**k to guard against 337 // buddy-system style malloc implementations 338 #ifdef _LP64 339 slack = 40, // [RGV] Not sure if this is right, but make it 340 // a multiple of 8. 341 #else 342 slack = 20, // suspected sizeof(Chunk) + internal malloc headers 343 #endif 344 345 tiny_size = 256 - slack, // Size of first chunk (tiny) 346 init_size = 1*K - slack, // Size of first chunk (normal aka small) 347 medium_size= 10*K - slack, // Size of medium-sized chunk 348 size = 32*K - slack, // Default size of an Arena chunk (following the first) 349 non_pool_size = init_size + 32 // An initial size which is not one of above 350 }; 351 352 void chop(); // Chop this chunk 353 void next_chop(); // Chop next chunk 354 static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } 355 static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); } 356 357 size_t length() const { return _len; } 358 Chunk* next() const { return _next; } 359 void set_next(Chunk* n) { _next = n; } 360 // Boundaries of data area (possibly unused) 361 char* bottom() const { return ((char*) this) + aligned_overhead_size(); } 362 char* top() const { return bottom() + _len; } 363 bool contains(char* p) const { return bottom() <= p && p <= top(); } 364 365 // Start the chunk_pool cleaner task 366 static void start_chunk_pool_cleaner_task(); 367 368 static void clean_chunk_pool(); 369 }; 370 371 //------------------------------Arena------------------------------------------ 372 // Fast allocation of memory 373 class Arena : public CHeapObj<mtNone> { 374 protected: 375 friend class ResourceMark; 376 friend class HandleMark; 377 friend class NoHandleMark; 378 friend class VMStructs; 379 380 MEMFLAGS _flags; // Memory tracking flags 381 382 Chunk *_first; // First chunk 383 Chunk *_chunk; // current chunk 384 char *_hwm, *_max; // High water mark and max in current chunk 385 // Get a new Chunk of at least size x 386 void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 387 size_t _size_in_bytes; // Size of arena (used for native memory tracking) 388 389 NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start 390 friend class AllocStats; 391 debug_only(void* malloc(size_t size);) 392 debug_only(void* internal_malloc_4(size_t x);) 393 NOT_PRODUCT(void inc_bytes_allocated(size_t x);) 394 395 void signal_out_of_memory(size_t request, const char* whence) const; 396 397 bool check_for_overflow(size_t request, const char* whence, 398 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { 399 if (UINTPTR_MAX - request < (uintptr_t)_hwm) { 400 if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { 401 return false; 402 } 403 signal_out_of_memory(request, whence); 404 } 405 return true; 406 } 407 408 public: 409 Arena(MEMFLAGS memflag); 410 Arena(MEMFLAGS memflag, size_t init_size); 411 ~Arena(); 412 void destruct_contents(); 413 char* hwm() const { return _hwm; } 414 415 // new operators 416 void* operator new (size_t size) throw(); 417 void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); 418 419 // dynamic memory type tagging 420 void* operator new(size_t size, MEMFLAGS flags) throw(); 421 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); 422 void operator delete(void* p); 423 424 // Fast allocate in the arena. Common case is: pointer test + increment. 425 void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 426 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); 427 x = ARENA_ALIGN(x); 428 debug_only(if (UseMallocOnly) return malloc(x);) 429 if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) 430 return NULL; 431 NOT_PRODUCT(inc_bytes_allocated(x);) 432 if (_hwm + x > _max) { 433 return grow(x, alloc_failmode); 434 } else { 435 char *old = _hwm; 436 _hwm += x; 437 return old; 438 } 439 } 440 // Further assume size is padded out to words 441 void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 442 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 443 debug_only(if (UseMallocOnly) return malloc(x);) 444 if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) 445 return NULL; 446 NOT_PRODUCT(inc_bytes_allocated(x);) 447 if (_hwm + x > _max) { 448 return grow(x, alloc_failmode); 449 } else { 450 char *old = _hwm; 451 _hwm += x; 452 return old; 453 } 454 } 455 456 // Allocate with 'double' alignment. It is 8 bytes on sparc. 457 // In other cases Amalloc_D() should be the same as Amalloc_4(). 458 void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 459 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 460 debug_only(if (UseMallocOnly) return malloc(x);) 461 #if defined(SPARC) && !defined(_LP64) 462 #define DALIGN_M1 7 463 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; 464 x += delta; 465 #endif 466 if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) 467 return NULL; 468 NOT_PRODUCT(inc_bytes_allocated(x);) 469 if (_hwm + x > _max) { 470 return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. 471 } else { 472 char *old = _hwm; 473 _hwm += x; 474 #if defined(SPARC) && !defined(_LP64) 475 old += delta; // align to 8-bytes 476 #endif 477 return old; 478 } 479 } 480 481 // Fast delete in area. Common case is: NOP (except for storage reclaimed) 482 void Afree(void *ptr, size_t size) { 483 #ifdef ASSERT 484 if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory 485 if (UseMallocOnly) return; 486 #endif 487 if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; 488 } 489 490 void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, 491 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 492 493 // Move contents of this arena into an empty arena 494 Arena *move_contents(Arena *empty_arena); 495 496 // Determine if pointer belongs to this Arena or not. 497 bool contains( const void *ptr ) const; 498 499 // Total of all chunks in use (not thread-safe) 500 size_t used() const; 501 502 // Total # of bytes used 503 size_t size_in_bytes() const { return _size_in_bytes; }; 504 void set_size_in_bytes(size_t size); 505 506 static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; 507 static void free_all(char** start, char** end) PRODUCT_RETURN; 508 509 private: 510 // Reset this Arena to empty, access will trigger grow if necessary 511 void reset(void) { 512 _first = _chunk = NULL; 513 _hwm = _max = NULL; 514 set_size_in_bytes(0); 515 } 516 }; 517 518 // One of the following macros must be used when allocating 519 // an array or object from an arena 520 #define NEW_ARENA_ARRAY(arena, type, size) \ 521 (type*) (arena)->Amalloc((size) * sizeof(type)) 522 523 #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ 524 (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ 525 (new_size) * sizeof(type) ) 526 527 #define FREE_ARENA_ARRAY(arena, type, old, size) \ 528 (arena)->Afree((char*)(old), (size) * sizeof(type)) 529 530 #define NEW_ARENA_OBJ(arena, type) \ 531 NEW_ARENA_ARRAY(arena, type, 1) 532 533 534 //%note allocation_1 535 extern char* resource_allocate_bytes(size_t size, 536 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 537 extern char* resource_allocate_bytes(Thread* thread, size_t size, 538 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 539 extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, 540 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 541 extern void resource_free_bytes( char *old, size_t size ); 542 543 //---------------------------------------------------------------------- 544 // Base class for objects allocated in the resource area per default. 545 // Optionally, objects may be allocated on the C heap with 546 // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) 547 // ResourceObj's can be allocated within other objects, but don't use 548 // new or delete (allocation_type is unknown). If new is used to allocate, 549 // use delete to deallocate. 550 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { 551 public: 552 enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; 553 static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; 554 #ifdef ASSERT 555 private: 556 // When this object is allocated on stack the new() operator is not 557 // called but garbage on stack may look like a valid allocation_type. 558 // Store negated 'this' pointer when new() is called to distinguish cases. 559 // Use second array's element for verification value to distinguish garbage. 560 uintptr_t _allocation_t[2]; 561 bool is_type_set() const; 562 public: 563 allocation_type get_allocation_type() const; 564 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } 565 bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } 566 bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } 567 bool allocated_on_arena() const { return get_allocation_type() == ARENA; } 568 ResourceObj(); // default constructor 569 ResourceObj(const ResourceObj& r); // default copy constructor 570 ResourceObj& operator=(const ResourceObj& r); // default copy assignment 571 ~ResourceObj(); 572 #endif // ASSERT 573 574 public: 575 void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw(); 576 void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw(); 577 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, 578 allocation_type type, MEMFLAGS flags) throw(); 579 void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, 580 allocation_type type, MEMFLAGS flags) throw(); 581 582 void* operator new(size_t size, Arena *arena) throw() { 583 address res = (address)arena->Amalloc(size); 584 DEBUG_ONLY(set_allocation_type(res, ARENA);) 585 return res; 586 } 587 588 void* operator new [](size_t size, Arena *arena) throw() { 589 address res = (address)arena->Amalloc(size); 590 DEBUG_ONLY(set_allocation_type(res, ARENA);) 591 return res; 592 } 593 594 void* operator new(size_t size) throw() { 595 address res = (address)resource_allocate_bytes(size); 596 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) 597 return res; 598 } 599 600 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 601 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); 602 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) 603 return res; 604 } 605 606 void* operator new [](size_t size) throw() { 607 address res = (address)resource_allocate_bytes(size); 608 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) 609 return res; 610 } 611 612 void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() { 613 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); 614 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) 615 return res; 616 } 617 618 void operator delete(void* p); 619 void operator delete [](void* p); 620 }; 621 622 // One of the following macros must be used when allocating an array 623 // or object to determine whether it should reside in the C heap on in 624 // the resource area. 625 626 #define NEW_RESOURCE_ARRAY(type, size)\ 627 (type*) resource_allocate_bytes((size) * sizeof(type)) 628 629 #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ 630 (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 631 632 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ 633 (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) 634 635 #define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ 636 (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 637 638 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ 639 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) 640 641 #define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ 642 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ 643 (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 644 645 #define FREE_RESOURCE_ARRAY(type, old, size)\ 646 resource_free_bytes((char*)(old), (size) * sizeof(type)) 647 648 #define FREE_FAST(old)\ 649 /* nop */ 650 651 #define NEW_RESOURCE_OBJ(type)\ 652 NEW_RESOURCE_ARRAY(type, 1) 653 654 #define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ 655 NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) 656 657 #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ 658 (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) 659 660 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ 661 (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) 662 663 #define NEW_C_HEAP_ARRAY(type, size, memflags)\ 664 (type*) (AllocateHeap((size) * sizeof(type), memflags)) 665 666 #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ 667 NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) 668 669 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ 670 NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL) 671 672 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ 673 (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) 674 675 #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ 676 (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) 677 678 #define FREE_C_HEAP_ARRAY(type, old) \ 679 FreeHeap((char*)(old)) 680 681 // allocate type in heap without calling ctor 682 #define NEW_C_HEAP_OBJ(type, memflags)\ 683 NEW_C_HEAP_ARRAY(type, 1, memflags) 684 685 #define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ 686 NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) 687 688 // deallocate obj of type in heap without calling dtor 689 #define FREE_C_HEAP_OBJ(objname)\ 690 FreeHeap((char*)objname); 691 692 // for statistics 693 #ifndef PRODUCT 694 class AllocStats : StackObj { 695 julong start_mallocs, start_frees; 696 julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; 697 public: 698 AllocStats(); 699 700 julong num_mallocs(); // since creation of receiver 701 julong alloc_bytes(); 702 julong num_frees(); 703 julong free_bytes(); 704 julong resource_bytes(); 705 void print(); 706 }; 707 #endif 708 709 710 //------------------------------ReallocMark--------------------------------- 711 // Code which uses REALLOC_RESOURCE_ARRAY should check an associated 712 // ReallocMark, which is declared in the same scope as the reallocated 713 // pointer. Any operation that could __potentially__ cause a reallocation 714 // should check the ReallocMark. 715 class ReallocMark: public StackObj { 716 protected: 717 NOT_PRODUCT(int _nesting;) 718 719 public: 720 ReallocMark() PRODUCT_RETURN; 721 void check() PRODUCT_RETURN; 722 }; 723 724 // Helper class to allocate arrays that may become large. 725 // Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit 726 // and uses mapped memory for larger allocations. 727 // Most OS mallocs do something similar but Solaris malloc does not revert 728 // to mapped memory for large allocations. By default ArrayAllocatorMallocLimit 729 // is set so that we always use malloc except for Solaris where we set the 730 // limit to get mapped memory. 731 template <class E, MEMFLAGS F> 732 class ArrayAllocator VALUE_OBJ_CLASS_SPEC { 733 char* _addr; 734 bool _use_malloc; 735 size_t _size; 736 bool _free_in_destructor; 737 738 static bool should_use_malloc(size_t size) { 739 return size < ArrayAllocatorMallocLimit; 740 } 741 742 static char* allocate_inner(size_t& size, bool& use_malloc); 743 public: 744 ArrayAllocator(bool free_in_destructor = true) : 745 _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } 746 747 ~ArrayAllocator() { 748 if (_free_in_destructor) { 749 free(); 750 } 751 } 752 753 E* allocate(size_t length); 754 E* reallocate(size_t new_length); 755 void free(); 756 }; 757 758 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP