1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP 27 28 #include "gc_interface/collectedHeap.hpp" 29 #include "memory/threadLocalAllocBuffer.inline.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "prims/jvmtiExport.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/thread.hpp" 35 #include "services/lowMemoryDetector.hpp" 36 #include "utilities/copy.hpp" 37 #ifdef TARGET_OS_FAMILY_linux 38 # include "thread_linux.inline.hpp" 39 #endif 40 #ifdef TARGET_OS_FAMILY_solaris 41 # include "thread_solaris.inline.hpp" 42 #endif 43 #ifdef TARGET_OS_FAMILY_windows 44 # include "thread_windows.inline.hpp" 45 #endif 46 #ifdef TARGET_OS_FAMILY_bsd 47 # include "thread_bsd.inline.hpp" 48 #endif 49 50 // Inline allocation implementations. 51 52 void CollectedHeap::post_allocation_setup_common(KlassHandle klass, 53 HeapWord* obj, 54 size_t size) { 55 post_allocation_setup_no_klass_install(klass, obj, size); 56 post_allocation_install_obj_klass(klass, oop(obj), (int) size); 57 } 58 59 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, 60 HeapWord* objPtr, 61 size_t size) { 62 oop obj = (oop)objPtr; 63 64 assert(obj != NULL, "NULL object pointer"); 65 if (UseBiasedLocking && (klass() != NULL)) { 66 obj->set_mark(klass->prototype_header()); 67 } else { 68 // May be bootstrapping 69 obj->set_mark(markOopDesc::prototype()); 70 } 71 } 72 73 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, 74 oop obj, 75 int size) { 76 // These asserts are kind of complicated because of klassKlass 77 // and the beginning of the world. 78 assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass"); 79 assert(klass() == NULL || klass()->is_klass(), "not a klass"); 80 assert(klass() == NULL || klass()->klass_part() != NULL, "not a klass"); 81 assert(obj != NULL, "NULL object pointer"); 82 obj->set_klass(klass()); 83 assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL, 84 "missing blueprint"); 85 } 86 87 // Support for jvmti and dtrace 88 inline void post_allocation_notify(KlassHandle klass, oop obj) { 89 // support low memory notifications (no-op if not enabled) 90 LowMemoryDetector::detect_low_memory_for_collected_pools(); 91 92 // support for JVMTI VMObjectAlloc event (no-op if not enabled) 93 JvmtiExport::vm_object_alloc_event_collector(obj); 94 95 if (DTraceAllocProbes) { 96 // support for Dtrace object alloc event (no-op most of the time) 97 if (klass() != NULL && klass()->klass_part()->name() != NULL) { 98 SharedRuntime::dtrace_object_alloc(obj); 99 } 100 } 101 } 102 103 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, 104 HeapWord* obj, 105 size_t size) { 106 post_allocation_setup_common(klass, obj, size); 107 assert(Universe::is_bootstrapping() || 108 !((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); 109 // notify jvmti and dtrace 110 post_allocation_notify(klass, (oop)obj); 111 } 112 113 void CollectedHeap::post_allocation_setup_array(KlassHandle klass, 114 HeapWord* obj, 115 size_t size, 116 int length) { 117 // Set array length before setting the _klass field 118 // in post_allocation_setup_common() because the klass field 119 // indicates that the object is parsable by concurrent GC. 120 assert(length >= 0, "length should be non-negative"); 121 ((arrayOop)obj)->set_length(length); 122 post_allocation_setup_common(klass, obj, size); 123 assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array"); 124 // notify jvmti and dtrace (must be after length is set for dtrace) 125 post_allocation_notify(klass, (oop)obj); 126 } 127 128 HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) { 129 130 // Clear unhandled oops for memory allocation. Memory allocation might 131 // not take out a lock if from tlab, so clear here. 132 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 133 134 if (HAS_PENDING_EXCEPTION) { 135 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 136 return NULL; // caller does a CHECK_0 too 137 } 138 139 HeapWord* result = NULL; 140 if (UseTLAB) { 141 result = CollectedHeap::allocate_from_tlab(THREAD, size); 142 if (result != NULL) { 143 assert(!HAS_PENDING_EXCEPTION, 144 "Unexpected exception, will result in uninitialized storage"); 145 return result; 146 } 147 } 148 bool gc_overhead_limit_was_exceeded = false; 149 result = Universe::heap()->mem_allocate(size, 150 &gc_overhead_limit_was_exceeded); 151 if (result != NULL) { 152 NOT_PRODUCT(Universe::heap()-> 153 check_for_non_bad_heap_word_value(result, size)); 154 assert(!HAS_PENDING_EXCEPTION, 155 "Unexpected exception, will result in uninitialized storage"); 156 THREAD->incr_allocated_bytes(size * HeapWordSize); 157 return result; 158 } 159 160 161 if (!gc_overhead_limit_was_exceeded) { 162 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 163 report_java_out_of_memory("Java heap space"); 164 165 if (JvmtiExport::should_post_resource_exhausted()) { 166 JvmtiExport::post_resource_exhausted( 167 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 168 "Java heap space"); 169 } 170 171 THROW_OOP_0(Universe::out_of_memory_error_java_heap()); 172 } else { 173 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 174 report_java_out_of_memory("GC overhead limit exceeded"); 175 176 if (JvmtiExport::should_post_resource_exhausted()) { 177 JvmtiExport::post_resource_exhausted( 178 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 179 "GC overhead limit exceeded"); 180 } 181 182 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit()); 183 } 184 } 185 186 HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) { 187 HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); 188 init_obj(obj, size); 189 return obj; 190 } 191 192 // Need to investigate, do we really want to throw OOM exception here? 193 HeapWord* CollectedHeap::common_permanent_mem_allocate_noinit(size_t size, TRAPS) { 194 if (HAS_PENDING_EXCEPTION) { 195 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 196 return NULL; // caller does a CHECK_NULL too 197 } 198 199 #ifdef ASSERT 200 if (CIFireOOMAt > 0 && THREAD->is_Compiler_thread() && 201 ++_fire_out_of_memory_count >= CIFireOOMAt) { 202 // For testing of OOM handling in the CI throw an OOM and see how 203 // it does. Historically improper handling of these has resulted 204 // in crashes which we really don't want to have in the CI. 205 THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); 206 } 207 #endif 208 209 HeapWord* result = Universe::heap()->permanent_mem_allocate(size); 210 if (result != NULL) { 211 NOT_PRODUCT(Universe::heap()-> 212 check_for_non_bad_heap_word_value(result, size)); 213 assert(!HAS_PENDING_EXCEPTION, 214 "Unexpected exception, will result in uninitialized storage"); 215 return result; 216 } 217 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 218 report_java_out_of_memory("PermGen space"); 219 220 if (JvmtiExport::should_post_resource_exhausted()) { 221 JvmtiExport::post_resource_exhausted( 222 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 223 "PermGen space"); 224 } 225 226 THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); 227 } 228 229 HeapWord* CollectedHeap::common_permanent_mem_allocate_init(size_t size, TRAPS) { 230 HeapWord* obj = common_permanent_mem_allocate_noinit(size, CHECK_NULL); 231 init_obj(obj, size); 232 return obj; 233 } 234 235 HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) { 236 assert(UseTLAB, "should use UseTLAB"); 237 238 HeapWord* obj = thread->tlab().allocate(size); 239 if (obj != NULL) { 240 return obj; 241 } 242 // Otherwise... 243 return allocate_from_tlab_slow(thread, size); 244 } 245 246 void CollectedHeap::init_obj(HeapWord* obj, size_t size) { 247 assert(obj != NULL, "cannot initialize NULL object"); 248 const size_t hs = oopDesc::header_size(); 249 assert(size >= hs, "unexpected object size"); 250 ((oop)obj)->set_klass_gap(0); 251 Copy::fill_to_aligned_words(obj + hs, size - hs); 252 } 253 254 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { 255 debug_only(check_for_valid_allocation_state()); 256 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 257 assert(size >= 0, "int won't convert to size_t"); 258 HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); 259 post_allocation_setup_obj(klass, obj, size); 260 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 261 return (oop)obj; 262 } 263 264 oop CollectedHeap::array_allocate(KlassHandle klass, 265 int size, 266 int length, 267 TRAPS) { 268 debug_only(check_for_valid_allocation_state()); 269 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 270 assert(size >= 0, "int won't convert to size_t"); 271 HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); 272 post_allocation_setup_array(klass, obj, size, length); 273 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 274 return (oop)obj; 275 } 276 277 oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) { 278 oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL); 279 post_allocation_install_obj_klass(klass, obj, size); 280 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj, 281 size)); 282 return obj; 283 } 284 285 oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass, 286 int size, 287 TRAPS) { 288 debug_only(check_for_valid_allocation_state()); 289 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 290 assert(size >= 0, "int won't convert to size_t"); 291 HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); 292 post_allocation_setup_no_klass_install(klass, obj, size); 293 #ifndef PRODUCT 294 const size_t hs = oopDesc::header_size(); 295 Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs); 296 #endif 297 return (oop)obj; 298 } 299 300 oop CollectedHeap::permanent_array_allocate(KlassHandle klass, 301 int size, 302 int length, 303 TRAPS) { 304 debug_only(check_for_valid_allocation_state()); 305 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 306 assert(size >= 0, "int won't convert to size_t"); 307 HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); 308 post_allocation_setup_array(klass, obj, size, length); 309 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 310 return (oop)obj; 311 } 312 313 // Returns "TRUE" if "p" is a method oop in the 314 // current heap with high probability. NOTE: The main 315 // current consumers of this interface are Forte:: 316 // and ThreadProfiler::. In these cases, the 317 // interpreter frame from which "p" came, may be 318 // under construction when sampled asynchronously, so 319 // the clients want to check that it represents a 320 // valid method before using it. Nonetheless since 321 // the clients do not typically lock out GC, the 322 // predicate is_valid_method() is not stable, so 323 // it is possible that by the time "p" is used, it 324 // is no longer valid. 325 inline bool CollectedHeap::is_valid_method(oop p) const { 326 return 327 p != NULL && 328 329 // Check whether it is aligned at a HeapWord boundary. 330 Space::is_aligned(p) && 331 332 // Check whether "method" is in the allocated part of the 333 // permanent generation -- this needs to be checked before 334 // p->klass() below to avoid a SEGV (but see below 335 // for a potential window of vulnerability). 336 is_permanent((void*)p) && 337 338 // See if GC is active; however, there is still an 339 // apparently unavoidable window after this call 340 // and before the client of this interface uses "p". 341 // If the client chooses not to lock out GC, then 342 // it's a risk the client must accept. 343 !is_gc_active() && 344 345 // Check that p is a methodOop. 346 p->klass() == Universe::methodKlassObj(); 347 } 348 349 350 #ifndef PRODUCT 351 352 inline bool 353 CollectedHeap::promotion_should_fail(volatile size_t* count) { 354 // Access to count is not atomic; the value does not have to be exact. 355 if (PromotionFailureALot) { 356 const size_t gc_num = total_collections(); 357 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 358 if (elapsed_gcs >= PromotionFailureALotInterval) { 359 // Test for unsigned arithmetic wrap-around. 360 if (++*count >= PromotionFailureALotCount) { 361 *count = 0; 362 return true; 363 } 364 } 365 } 366 return false; 367 } 368 369 inline bool CollectedHeap::promotion_should_fail() { 370 return promotion_should_fail(&_promotion_failure_alot_count); 371 } 372 373 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 374 if (PromotionFailureALot) { 375 _promotion_failure_alot_gc_number = total_collections(); 376 *count = 0; 377 } 378 } 379 380 inline void CollectedHeap::reset_promotion_should_fail() { 381 reset_promotion_should_fail(&_promotion_failure_alot_count); 382 } 383 #endif // #ifndef PRODUCT 384 385 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP