1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/genCollectedHeap.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/arena.hpp" 30 #include "memory/metaspaceShared.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/task.hpp" 36 #include "runtime/threadCritical.hpp" 37 #include "services/memTracker.hpp" 38 #include "utilities/ostream.hpp" 39 40 // allocate using malloc; will fail if no memory available 41 char* AllocateHeap(size_t size, 42 MEMFLAGS flags, 43 const NativeCallStack& stack, 44 AllocFailType alloc_failmode) { 45 char* p = (char*) os::malloc(size, flags, stack); 46 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 47 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); 48 } 49 return p; 50 } 51 52 char* AllocateHeap(size_t size, 53 MEMFLAGS flags) { 54 return AllocateHeap(size, flags, CALLER_PC); 55 } 56 char* AllocateHeap(size_t size, 57 MEMFLAGS flags, 58 const std::nothrow_t& nothrow_constant) { 59 return AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); 60 } 61 char* AllocateHeap(size_t size, 62 MEMFLAGS flags, 63 const std::nothrow_t& nothrow_constant, 64 const NativeCallStack& stack) { 65 return AllocateHeap(size, flags, stack, AllocFailStrategy::RETURN_NULL); 66 } 67 68 char* ReallocateHeap(char *old, 69 size_t size, 70 MEMFLAGS flag, 71 AllocFailType alloc_failmode) { 72 char* p = (char*) os::realloc(old, size, flag, CALLER_PC); 73 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 74 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); 75 } 76 return p; 77 } 78 79 void FreeHeap(void* p) { 80 os::free(p); 81 } 82 83 void* MetaspaceObj::_shared_metaspace_base = NULL; 84 void* MetaspaceObj::_shared_metaspace_top = NULL; 85 86 void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 87 void StackObj::operator delete(void* p) { ShouldNotCallThis(); } 88 void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 89 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } 90 91 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 92 size_t word_size, 93 MetaspaceObj::Type type, TRAPS) throw() { 94 // Klass has it's own operator new 95 return Metaspace::allocate(loader_data, word_size, type, THREAD); 96 } 97 98 bool MetaspaceObj::is_metaspace_object() const { 99 return Metaspace::contains((void*)this); 100 } 101 102 void MetaspaceObj::print_address_on(outputStream* st) const { 103 st->print(" {" INTPTR_FORMAT "}", p2i(this)); 104 } 105 106 void* ResourceObj::operator new(size_t size, Arena *arena) throw() { 107 address res = (address)arena->Amalloc(size); 108 DEBUG_ONLY(set_allocation_type(res, ARENA);) 109 return res; 110 } 111 112 void* ResourceObj::operator new [](size_t size, Arena *arena) throw() { 113 address res = (address)arena->Amalloc(size); 114 DEBUG_ONLY(set_allocation_type(res, ARENA);) 115 return res; 116 } 117 118 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { 119 address res = NULL; 120 switch (type) { 121 case C_HEAP: 122 res = (address)AllocateHeap(size, flags, CALLER_PC); 123 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) 124 break; 125 case RESOURCE_AREA: 126 // new(size) sets allocation type RESOURCE_AREA. 127 res = (address)operator new(size); 128 break; 129 default: 130 ShouldNotReachHere(); 131 } 132 return res; 133 } 134 135 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { 136 return (address) operator new(size, type, flags); 137 } 138 139 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, 140 allocation_type type, MEMFLAGS flags) throw() { 141 // should only call this with std::nothrow, use other operator new() otherwise 142 address res = NULL; 143 switch (type) { 144 case C_HEAP: 145 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); 146 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) 147 break; 148 case RESOURCE_AREA: 149 // new(size) sets allocation type RESOURCE_AREA. 150 res = (address)operator new(size, std::nothrow); 151 break; 152 default: 153 ShouldNotReachHere(); 154 } 155 return res; 156 } 157 158 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, 159 allocation_type type, MEMFLAGS flags) throw() { 160 return (address)operator new(size, nothrow_constant, type, flags); 161 } 162 163 void ResourceObj::operator delete(void* p) { 164 assert(((ResourceObj *)p)->allocated_on_C_heap(), 165 "delete only allowed for C_HEAP objects"); 166 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 167 FreeHeap(p); 168 } 169 170 void ResourceObj::operator delete [](void* p) { 171 operator delete(p); 172 } 173 174 #ifdef ASSERT 175 void ResourceObj::set_allocation_type(address res, allocation_type type) { 176 // Set allocation type in the resource object 177 uintptr_t allocation = (uintptr_t)res; 178 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res)); 179 assert(type <= allocation_mask, "incorrect allocation type"); 180 ResourceObj* resobj = (ResourceObj *)res; 181 resobj->_allocation_t[0] = ~(allocation + type); 182 if (type != STACK_OR_EMBEDDED) { 183 // Called from operator new() and CollectionSetChooser(), 184 // set verification value. 185 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 186 } 187 } 188 189 ResourceObj::allocation_type ResourceObj::get_allocation_type() const { 190 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 191 return (allocation_type)((~_allocation_t[0]) & allocation_mask); 192 } 193 194 bool ResourceObj::is_type_set() const { 195 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 196 return get_allocation_type() == type && 197 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 198 } 199 200 ResourceObj::ResourceObj() { // default constructor 201 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 202 // Operator new() is not called for allocations 203 // on stack and for embedded objects. 204 set_allocation_type((address)this, STACK_OR_EMBEDDED); 205 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED 206 // For some reason we got a value which resembles 207 // an embedded or stack object (operator new() does not 208 // set such type). Keep it since it is valid value 209 // (even if it was garbage). 210 // Ignore garbage in other fields. 211 } else if (is_type_set()) { 212 // Operator new() was called and type was set. 213 assert(!allocated_on_stack(), 214 "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 215 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); 216 } else { 217 // Operator new() was not called. 218 // Assume that it is embedded or stack object. 219 set_allocation_type((address)this, STACK_OR_EMBEDDED); 220 } 221 _allocation_t[1] = 0; // Zap verification value 222 } 223 224 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor 225 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. 226 // Note: garbage may resembles valid value. 227 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), 228 "embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 229 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); 230 set_allocation_type((address)this, STACK_OR_EMBEDDED); 231 _allocation_t[1] = 0; // Zap verification value 232 } 233 234 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment 235 // Used in InlineTree::ok_to_inline() for WarmCallInfo. 236 assert(allocated_on_stack(), 237 "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 238 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); 239 // Keep current _allocation_t value; 240 return *this; 241 } 242 243 ResourceObj::~ResourceObj() { 244 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 245 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. 246 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 247 } 248 } 249 #endif // ASSERT 250 251 //-------------------------------------------------------------------------------------- 252 // Non-product code 253 254 #ifndef PRODUCT 255 void AllocatedObj::print() const { print_on(tty); } 256 void AllocatedObj::print_value() const { print_value_on(tty); } 257 258 void AllocatedObj::print_on(outputStream* st) const { 259 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); 260 } 261 262 void AllocatedObj::print_value_on(outputStream* st) const { 263 st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); 264 } 265 266 AllocStats::AllocStats() { 267 start_mallocs = os::num_mallocs; 268 start_frees = os::num_frees; 269 start_malloc_bytes = os::alloc_bytes; 270 start_mfree_bytes = os::free_bytes; 271 start_res_bytes = Arena::_bytes_allocated; 272 } 273 274 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } 275 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 276 julong AllocStats::num_frees() { return os::num_frees - start_frees; } 277 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } 278 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 279 void AllocStats::print() { 280 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " 281 UINT64_FORMAT " frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", 282 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); 283 } 284 285 ReallocMark::ReallocMark() { 286 #ifdef ASSERT 287 Thread *thread = Thread::current(); 288 _nesting = thread->resource_area()->nesting(); 289 #endif 290 } 291 292 void ReallocMark::check() { 293 #ifdef ASSERT 294 if (_nesting != Thread::current()->resource_area()->nesting()) { 295 fatal("allocation bug: array could grow within nested ResourceMark"); 296 } 297 #endif 298 } 299 300 #endif // Non-product