1 #ifdef USE_PRAGMA_IDENT_HDR 2 #pragma ident "@(#)collectedHeap.inline.hpp 1.50 07/09/07 10:56:50 JVM" 3 #endif 4 /* 5 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 // Inline allocation implementations. 29 30 void CollectedHeap::post_allocation_setup_common(KlassHandle klass, 31 HeapWord* obj, 32 size_t size) { 33 post_allocation_setup_no_klass_install(klass, obj, size); 34 post_allocation_install_obj_klass(klass, oop(obj), (int) size); 35 } 36 37 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, 38 HeapWord* objPtr, 39 size_t size) { 40 41 oop obj = (oop)objPtr; 42 43 assert(obj != NULL, "NULL object pointer"); 44 if (UseBiasedLocking && (klass() != NULL)) { 45 obj->set_mark(klass->prototype_header()); 46 } else { 47 // May be bootstrapping 48 obj->set_mark(markOopDesc::prototype()); 49 } 50 51 // support low memory notifications (no-op if not enabled) 52 LowMemoryDetector::detect_low_memory_for_collected_pools(); 53 } 54 55 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, 56 oop obj, 57 int size) { 58 // These asserts are kind of complicated because of klassKlass 59 // and the beginning of the world. 60 assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass"); 61 assert(klass() == NULL || klass()->is_klass(), "not a klass"); 62 assert(klass() == NULL || klass()->klass_part() != NULL, "not a klass"); 63 assert(obj != NULL, "NULL object pointer"); 64 obj->set_klass(klass()); 65 assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL, 66 "missing blueprint"); 67 68 // support for JVMTI VMObjectAlloc event (no-op if not enabled) 69 JvmtiExport::vm_object_alloc_event_collector(obj); 70 71 if (DTraceAllocProbes) { 72 // support for Dtrace object alloc event (no-op most of the time) 73 if (klass() != NULL && klass()->klass_part()->name() != NULL) { 74 SharedRuntime::dtrace_object_alloc(obj); 75 } 76 } 77 } 78 79 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, 80 HeapWord* obj, 81 size_t size) { 82 post_allocation_setup_common(klass, obj, size); 83 assert(Universe::is_bootstrapping() || 84 !((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); 85 } 86 87 void CollectedHeap::post_allocation_setup_array(KlassHandle klass, 88 HeapWord* obj, 89 size_t size, 90 int length) { 91 // Set array length before posting jvmti object alloc event 92 // in post_allocation_setup_common() 93 assert(length >= 0, "length should be non-negative"); 94 ((arrayOop)obj)->set_length(length); 95 post_allocation_setup_common(klass, obj, size); 96 assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array"); 97 } 98 99 HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) { 100 101 // Clear unhandled oops for memory allocation. Memory allocation might 102 // not take out a lock if from tlab, so clear here. 103 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 104 105 if (HAS_PENDING_EXCEPTION) { 106 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 107 return NULL; // caller does a CHECK_0 too 108 } 109 110 // We may want to update this, is_noref objects might not be allocated in TLABs. 111 HeapWord* result = NULL; 112 if (UseTLAB) { 113 result = CollectedHeap::allocate_from_tlab(THREAD, size); 114 if (result != NULL) { 115 assert(!HAS_PENDING_EXCEPTION, 116 "Unexpected exception, will result in uninitialized storage"); 117 return result; 118 } 119 } 120 bool gc_overhead_limit_was_exceeded; 121 result = Universe::heap()->mem_allocate(size, 122 is_noref, 123 false, 124 &gc_overhead_limit_was_exceeded); 125 if (result != NULL) { 126 NOT_PRODUCT(Universe::heap()-> 127 check_for_non_bad_heap_word_value(result, size)); 128 assert(!HAS_PENDING_EXCEPTION, 129 "Unexpected exception, will result in uninitialized storage"); 130 return result; 131 } 132 133 134 if (!gc_overhead_limit_was_exceeded) { 135 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 136 report_java_out_of_memory("Java heap space"); 137 138 if (JvmtiExport::should_post_resource_exhausted()) { 139 JvmtiExport::post_resource_exhausted( 140 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 141 "Java heap space"); 142 } 143 144 THROW_OOP_0(Universe::out_of_memory_error_java_heap()); 145 } else { 146 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 147 report_java_out_of_memory("GC overhead limit exceeded"); 148 149 if (JvmtiExport::should_post_resource_exhausted()) { 150 JvmtiExport::post_resource_exhausted( 151 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 152 "GC overhead limit exceeded"); 153 } 154 155 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit()); 156 } 157 } 158 159 HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) { 160 HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL); 161 init_obj(obj, size); 162 return obj; 163 } 164 165 // Need to investigate, do we really want to throw OOM exception here? 166 HeapWord* CollectedHeap::common_permanent_mem_allocate_noinit(size_t size, TRAPS) { 167 if (HAS_PENDING_EXCEPTION) { 168 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 169 return NULL; // caller does a CHECK_NULL too 170 } 171 172 #ifdef ASSERT 173 if (CIFireOOMAt > 0 && THREAD->is_Compiler_thread() && 174 ++_fire_out_of_memory_count >= CIFireOOMAt) { 175 // For testing of OOM handling in the CI throw an OOM and see how 176 // it does. Historically improper handling of these has resulted 177 // in crashes which we really don't want to have in the CI. 178 THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); 179 } 180 #endif 181 182 HeapWord* result = Universe::heap()->permanent_mem_allocate(size); 183 if (result != NULL) { 184 NOT_PRODUCT(Universe::heap()-> 185 check_for_non_bad_heap_word_value(result, size)); 186 assert(!HAS_PENDING_EXCEPTION, 187 "Unexpected exception, will result in uninitialized storage"); 188 return result; 189 } 190 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 191 report_java_out_of_memory("PermGen space"); 192 193 if (JvmtiExport::should_post_resource_exhausted()) { 194 JvmtiExport::post_resource_exhausted( 195 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 196 "PermGen space"); 197 } 198 199 THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); 200 } 201 202 HeapWord* CollectedHeap::common_permanent_mem_allocate_init(size_t size, TRAPS) { 203 HeapWord* obj = common_permanent_mem_allocate_noinit(size, CHECK_NULL); 204 init_obj(obj, size); 205 return obj; 206 } 207 208 HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) { 209 assert(UseTLAB, "should use UseTLAB"); 210 211 HeapWord* obj = thread->tlab().allocate(size); 212 if (obj != NULL) { 213 return obj; 214 } 215 // Otherwise... 216 return allocate_from_tlab_slow(thread, size); 217 } 218 219 void CollectedHeap::init_obj(HeapWord* obj, size_t size) { 220 assert(obj != NULL, "cannot initialize NULL object"); 221 const size_t hs = oopDesc::header_size(); 222 assert(size >= hs, "unexpected object size"); 223 Copy::fill_to_aligned_words(obj + hs, size - hs); 224 } 225 226 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { 227 debug_only(check_for_valid_allocation_state()); 228 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 229 assert(size >= 0, "int won't convert to size_t"); 230 HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL); 231 post_allocation_setup_obj(klass, obj, size); 232 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 233 return (oop)obj; 234 } 235 236 oop CollectedHeap::array_allocate(KlassHandle klass, 237 int size, 238 int length, 239 TRAPS) { 240 debug_only(check_for_valid_allocation_state()); 241 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 242 assert(size >= 0, "int won't convert to size_t"); 243 HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL); 244 post_allocation_setup_array(klass, obj, size, length); 245 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 246 return (oop)obj; 247 } 248 249 oop CollectedHeap::large_typearray_allocate(KlassHandle klass, 250 int size, 251 int length, 252 TRAPS) { 253 debug_only(check_for_valid_allocation_state()); 254 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 255 assert(size >= 0, "int won't convert to size_t"); 256 HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL); 257 post_allocation_setup_array(klass, obj, size, length); 258 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 259 return (oop)obj; 260 } 261 262 oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) { 263 oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL); 264 post_allocation_install_obj_klass(klass, obj, size); 265 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj, 266 size)); 267 return obj; 268 } 269 270 oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass, 271 int size, 272 TRAPS) { 273 debug_only(check_for_valid_allocation_state()); 274 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 275 assert(size >= 0, "int won't convert to size_t"); 276 HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); 277 post_allocation_setup_no_klass_install(klass, obj, size); 278 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 279 return (oop)obj; 280 } 281 282 oop CollectedHeap::permanent_array_allocate(KlassHandle klass, 283 int size, 284 int length, 285 TRAPS) { 286 debug_only(check_for_valid_allocation_state()); 287 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 288 assert(size >= 0, "int won't convert to size_t"); 289 HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); 290 post_allocation_setup_array(klass, obj, size, length); 291 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 292 return (oop)obj; 293 } 294 295 // Returns "TRUE" if "p" is a method oop in the 296 // current heap with high probability. NOTE: The main 297 // current consumers of this interface are Forte:: 298 // and ThreadProfiler::. In these cases, the 299 // interpreter frame from which "p" came, may be 300 // under construction when sampled asynchronously, so 301 // the clients want to check that it represents a 302 // valid method before using it. Nonetheless since 303 // the clients do not typically lock out GC, the 304 // predicate is_valid_method() is not stable, so 305 // it is possible that by the time "p" is used, it 306 // is no longer valid. 307 inline bool CollectedHeap::is_valid_method(oop p) const { 308 return 309 p != NULL && 310 311 // Check whether it is aligned at a HeapWord boundary. 312 Space::is_aligned(p) && 313 314 // Check whether "method" is in the allocated part of the 315 // permanent generation -- this needs to be checked before 316 // p->klass() below to avoid a SEGV (but see below 317 // for a potential window of vulnerability). 318 is_permanent((void*)p) && 319 320 // See if GC is active; however, there is still an 321 // apparently unavoidable window after this call 322 // and before the client of this interface uses "p". 323 // If the client chooses not to lock out GC, then 324 // it's a risk the client must accept. 325 !is_gc_active() && 326 327 // Check that p is a methodOop. 328 p->klass() == Universe::methodKlassObj(); 329 } 330 331 332 #ifndef PRODUCT 333 334 inline bool 335 CollectedHeap::promotion_should_fail(volatile size_t* count) { 336 // Access to count is not atomic; the value does not have to be exact. 337 if (PromotionFailureALot) { 338 const size_t gc_num = total_collections(); 339 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 340 if (elapsed_gcs >= PromotionFailureALotInterval) { 341 // Test for unsigned arithmetic wrap-around. 342 if (++*count >= PromotionFailureALotCount) { 343 *count = 0; 344 return true; 345 } 346 } 347 } 348 return false; 349 } 350 351 inline bool CollectedHeap::promotion_should_fail() { 352 return promotion_should_fail(&_promotion_failure_alot_count); 353 } 354 355 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 356 if (PromotionFailureALot) { 357 _promotion_failure_alot_gc_number = total_collections(); 358 *count = 0; 359 } 360 } 361 362 inline void CollectedHeap::reset_promotion_should_fail() { 363 reset_promotion_should_fail(&_promotion_failure_alot_count); 364 } 365 #endif // #ifndef PRODUCT