1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc_implementation/shared/gcHeapSummary.hpp" 28 #include "gc_implementation/shared/gcTrace.hpp" 29 #include "gc_implementation/shared/gcTraceTime.hpp" 30 #include "gc_implementation/shared/gcWhen.hpp" 31 #include "gc_implementation/shared/vmGCOperations.hpp" 32 #include "gc_interface/allocTracer.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "gc_interface/collectedHeap.inline.hpp" 35 #include "memory/metaspace.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/instanceMirrorKlass.hpp" 38 #include "runtime/init.hpp" 39 #include "runtime/thread.inline.hpp" 40 #include "services/heapDumper.hpp" 41 42 43 #ifdef ASSERT 44 int CollectedHeap::_fire_out_of_memory_count = 0; 45 #endif 46 47 size_t CollectedHeap::_filler_array_max_size = 0; 48 49 template <> 50 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 51 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 52 st->print_raw(m); 53 } 54 55 void GCHeapLog::log_heap(bool before) { 56 if (!should_log()) { 57 return; 58 } 59 60 double timestamp = fetch_timestamp(); 61 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 62 int index = compute_log_index(); 63 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 64 _records[index].timestamp = timestamp; 65 _records[index].data.is_before = before; 66 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 67 if (before) { 68 Universe::print_heap_before_gc(&st, true); 69 } else { 70 Universe::print_heap_after_gc(&st, true); 71 } 72 } 73 74 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 75 size_t capacity_in_words = capacity() / HeapWordSize; 76 77 return VirtualSpaceSummary( 78 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 79 } 80 81 GCHeapSummary CollectedHeap::create_heap_summary() { 82 VirtualSpaceSummary heap_space = create_heap_space_summary(); 83 return GCHeapSummary(heap_space, used()); 84 } 85 86 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 87 const MetaspaceSizes meta_space( 88 MetaspaceAux::allocated_capacity_bytes(), 89 MetaspaceAux::allocated_used_bytes(), 90 MetaspaceAux::reserved_in_bytes()); 91 const MetaspaceSizes data_space( 92 MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType), 93 MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType), 94 MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType)); 95 const MetaspaceSizes class_space( 96 MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType), 97 MetaspaceAux::allocated_used_bytes(Metaspace::ClassType), 98 MetaspaceAux::reserved_in_bytes(Metaspace::ClassType)); 99 100 return MetaspaceSummary(meta_space, data_space, class_space); 101 } 102 103 void CollectedHeap::print_heap_before_gc() { 104 if (PrintHeapAtGC) { 105 Universe::print_heap_before_gc(); 106 } 107 if (_gc_heap_log != NULL) { 108 _gc_heap_log->log_heap_before(); 109 } 110 } 111 112 void CollectedHeap::print_heap_after_gc() { 113 if (PrintHeapAtGC) { 114 Universe::print_heap_after_gc(); 115 } 116 if (_gc_heap_log != NULL) { 117 _gc_heap_log->log_heap_after(); 118 } 119 } 120 121 void CollectedHeap::register_nmethod(nmethod* nm) { 122 assert(SafepointSynchronize::is_at_safepoint() || CodeCache_lock->is_locked(), 123 err_msg("Must be at safepoint or code cache locked (code cache locked: %d)", CodeCache_lock->is_locked())); 124 } 125 126 void CollectedHeap::unregister_nmethod(nmethod* nm) { 127 assert(SafepointSynchronize::is_at_safepoint() || CodeCache_lock->is_locked(), 128 err_msg("Must be at safepoint or code cache locked (code cache locked: %d)", CodeCache_lock->is_locked())); 129 } 130 131 void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { 132 const GCHeapSummary& heap_summary = create_heap_summary(); 133 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 134 gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); 135 } 136 137 void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) { 138 trace_heap(GCWhen::BeforeGC, gc_tracer); 139 } 140 141 void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) { 142 trace_heap(GCWhen::AfterGC, gc_tracer); 143 } 144 145 // Memory state functions. 146 147 148 CollectedHeap::CollectedHeap() : _n_par_threads(0) 149 { 150 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 151 const size_t elements_per_word = HeapWordSize / sizeof(jint); 152 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 153 max_len / elements_per_word); 154 155 _barrier_set = NULL; 156 _is_gc_active = false; 157 _total_collections = _total_full_collections = 0; 158 _gc_cause = _gc_lastcause = GCCause::_no_gc; 159 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 160 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 161 162 if (UsePerfData) { 163 EXCEPTION_MARK; 164 165 // create the gc cause jvmstat counters 166 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 167 80, GCCause::to_string(_gc_cause), CHECK); 168 169 _perf_gc_lastcause = 170 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 171 80, GCCause::to_string(_gc_lastcause), CHECK); 172 } 173 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. 174 // Create the ring log 175 if (LogEvents) { 176 _gc_heap_log = new GCHeapLog(); 177 } else { 178 _gc_heap_log = NULL; 179 } 180 } 181 182 // This interface assumes that it's being called by the 183 // vm thread. It collects the heap assuming that the 184 // heap lock is already held and that we are executing in 185 // the context of the vm thread. 186 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 187 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 188 assert(Heap_lock->is_locked(), "Precondition#2"); 189 GCCauseSetter gcs(this, cause); 190 switch (cause) { 191 case GCCause::_heap_inspection: 192 case GCCause::_heap_dump: 193 case GCCause::_metadata_GC_threshold : { 194 HandleMark hm; 195 do_full_collection(false); // don't clear all soft refs 196 break; 197 } 198 case GCCause::_last_ditch_collection: { 199 HandleMark hm; 200 do_full_collection(true); // do clear all soft refs 201 break; 202 } 203 default: 204 ShouldNotReachHere(); // Unexpected use of this function 205 } 206 } 207 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation( 208 ClassLoaderData* loader_data, 209 size_t size, Metaspace::MetadataType mdtype) { 210 return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype); 211 } 212 213 214 void CollectedHeap::pre_initialize() { 215 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 216 // otherwise remains unused. 217 #ifdef COMPILER2 218 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() 219 && (DeferInitialCardMark || card_mark_must_follow_store()); 220 #else 221 assert(_defer_initial_card_mark == false, "Who would set it?"); 222 #endif 223 } 224 225 #ifndef PRODUCT 226 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 227 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 228 for (size_t slot = 0; slot < size; slot += 1) { 229 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 230 "Found badHeapWordValue in post-allocation check"); 231 } 232 } 233 } 234 235 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 236 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 237 for (size_t slot = 0; slot < size; slot += 1) { 238 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 239 "Found non badHeapWordValue in pre-allocation check"); 240 } 241 } 242 } 243 #endif // PRODUCT 244 245 #ifdef ASSERT 246 void CollectedHeap::check_for_valid_allocation_state() { 247 Thread *thread = Thread::current(); 248 // How to choose between a pending exception and a potential 249 // OutOfMemoryError? Don't allow pending exceptions. 250 // This is a VM policy failure, so how do we exhaustively test it? 251 assert(!thread->has_pending_exception(), 252 "shouldn't be allocating with pending exception"); 253 if (StrictSafepointChecks) { 254 assert(thread->allow_allocation(), 255 "Allocation done by thread for which allocation is blocked " 256 "by No_Allocation_Verifier!"); 257 // Allocation of an oop can always invoke a safepoint, 258 // hence, the true argument 259 thread->check_for_valid_safepoint_state(true); 260 } 261 } 262 #endif 263 264 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) { 265 266 // Retain tlab and allocate object in shared space if 267 // the amount free in the tlab is too large to discard. 268 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 269 thread->tlab().record_slow_allocation(size); 270 return NULL; 271 } 272 273 // Discard tlab and allocate a new one. 274 // To minimize fragmentation, the last TLAB may be smaller than the rest. 275 size_t new_tlab_size = thread->tlab().compute_size(size); 276 277 thread->tlab().clear_before_allocation(); 278 279 if (new_tlab_size == 0) { 280 return NULL; 281 } 282 283 // Allocate a new TLAB... 284 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 285 if (obj == NULL) { 286 return NULL; 287 } 288 289 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 290 291 if (ZeroTLAB) { 292 // ..and clear it. 293 Copy::zero_to_words(obj, new_tlab_size); 294 } else { 295 // ...and zap just allocated object. 296 #ifdef ASSERT 297 // Skip mangling the space corresponding to the object header to 298 // ensure that the returned space is not considered parsable by 299 // any concurrent GC thread. 300 size_t hdr_size = oopDesc::header_size(); 301 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 302 #endif // ASSERT 303 } 304 thread->tlab().fill(obj, obj + size, new_tlab_size); 305 return obj; 306 } 307 308 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 309 MemRegion deferred = thread->deferred_card_mark(); 310 if (!deferred.is_empty()) { 311 assert(_defer_initial_card_mark, "Otherwise should be empty"); 312 { 313 // Verify that the storage points to a parsable object in heap 314 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 315 assert(is_in(old_obj), "Not in allocated heap"); 316 assert(!can_elide_initializing_store_barrier(old_obj), 317 "Else should have been filtered in new_store_pre_barrier()"); 318 assert(old_obj->is_oop(true), "Not an oop"); 319 assert(deferred.word_size() == (size_t)(old_obj->size()), 320 "Mismatch: multiple objects?"); 321 } 322 BarrierSet* bs = barrier_set(); 323 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 324 bs->write_region(deferred); 325 // "Clear" the deferred_card_mark field 326 thread->set_deferred_card_mark(MemRegion()); 327 } 328 assert(thread->deferred_card_mark().is_empty(), "invariant"); 329 } 330 331 // Helper for ReduceInitialCardMarks. For performance, 332 // compiled code may elide card-marks for initializing stores 333 // to a newly allocated object along the fast-path. We 334 // compensate for such elided card-marks as follows: 335 // (a) Generational, non-concurrent collectors, such as 336 // GenCollectedHeap(ParNew,DefNew,Tenured) and 337 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 338 // need the card-mark if and only if the region is 339 // in the old gen, and do not care if the card-mark 340 // succeeds or precedes the initializing stores themselves, 341 // so long as the card-mark is completed before the next 342 // scavenge. For all these cases, we can do a card mark 343 // at the point at which we do a slow path allocation 344 // in the old gen, i.e. in this call. 345 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 346 // in addition that the card-mark for an old gen allocated 347 // object strictly follow any associated initializing stores. 348 // In these cases, the memRegion remembered below is 349 // used to card-mark the entire region either just before the next 350 // slow-path allocation by this thread or just before the next scavenge or 351 // CMS-associated safepoint, whichever of these events happens first. 352 // (The implicit assumption is that the object has been fully 353 // initialized by this point, a fact that we assert when doing the 354 // card-mark.) 355 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 356 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is 357 // is used to remember the pre-value of any store. Initializing 358 // stores will not need this barrier, so we need not worry about 359 // compensating for the missing pre-barrier here. Turning now 360 // to the post-barrier, we note that G1 needs a RS update barrier 361 // which simply enqueues a (sequence of) dirty cards which may 362 // optionally be refined by the concurrent update threads. Note 363 // that this barrier need only be applied to a non-young write, 364 // but, like in CMS, because of the presence of concurrent refinement 365 // (much like CMS' precleaning), must strictly follow the oop-store. 366 // Thus, using the same protocol for maintaining the intended 367 // invariants turns out, serendepitously, to be the same for both 368 // G1 and CMS. 369 // 370 // For any future collector, this code should be reexamined with 371 // that specific collector in mind, and the documentation above suitably 372 // extended and updated. 373 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 374 // If a previous card-mark was deferred, flush it now. 375 flush_deferred_store_barrier(thread); 376 if (can_elide_initializing_store_barrier(new_obj)) { 377 // The deferred_card_mark region should be empty 378 // following the flush above. 379 assert(thread->deferred_card_mark().is_empty(), "Error"); 380 } else { 381 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 382 assert(!mr.is_empty(), "Error"); 383 if (_defer_initial_card_mark) { 384 // Defer the card mark 385 thread->set_deferred_card_mark(mr); 386 } else { 387 // Do the card mark 388 BarrierSet* bs = barrier_set(); 389 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 390 bs->write_region(mr); 391 } 392 } 393 return new_obj; 394 } 395 396 size_t CollectedHeap::filler_array_hdr_size() { 397 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 398 } 399 400 size_t CollectedHeap::filler_array_min_size() { 401 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 402 } 403 404 #ifdef ASSERT 405 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 406 { 407 assert(words >= min_fill_size(), "too small to fill"); 408 assert(words % MinObjAlignment == 0, "unaligned size"); 409 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 410 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 411 } 412 413 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 414 { 415 if (ZapFillerObjects && zap) { 416 Copy::fill_to_words(start + filler_array_hdr_size(), 417 words - filler_array_hdr_size(), 0XDEAFBABE); 418 } 419 } 420 #endif // ASSERT 421 422 void 423 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 424 { 425 assert(words >= filler_array_min_size(), "too small for an array"); 426 assert(words <= filler_array_max_size(), "too big for a single object"); 427 428 const size_t payload_size = words - filler_array_hdr_size(); 429 const size_t len = payload_size * HeapWordSize / sizeof(jint); 430 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len)); 431 432 // Set the length first for concurrent GC. 433 ((arrayOop)start)->set_length((int)len); 434 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 435 DEBUG_ONLY(zap_filler_array(start, words, zap);) 436 } 437 438 void 439 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 440 { 441 assert(words <= filler_array_max_size(), "too big for a single object"); 442 443 if (words >= filler_array_min_size()) { 444 fill_with_array(start, words, zap); 445 } else if (words > 0) { 446 assert(words == min_fill_size(), "unaligned size"); 447 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 448 } 449 } 450 451 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 452 { 453 DEBUG_ONLY(fill_args_check(start, words);) 454 HandleMark hm; // Free handles before leaving. 455 fill_with_object_impl(start, words, zap); 456 } 457 458 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 459 { 460 DEBUG_ONLY(fill_args_check(start, words);) 461 HandleMark hm; // Free handles before leaving. 462 463 #ifdef _LP64 464 // A single array can fill ~8G, so multiple objects are needed only in 64-bit. 465 // First fill with arrays, ensuring that any remaining space is big enough to 466 // fill. The remainder is filled with a single object. 467 const size_t min = min_fill_size(); 468 const size_t max = filler_array_max_size(); 469 while (words > max) { 470 const size_t cur = words - max >= min ? max : max - min; 471 fill_with_array(start, cur, zap); 472 start += cur; 473 words -= cur; 474 } 475 #endif 476 477 fill_with_object_impl(start, words, zap); 478 } 479 480 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 481 guarantee(false, "thread-local allocation buffers not supported"); 482 return NULL; 483 } 484 485 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 486 // The second disjunct in the assertion below makes a concession 487 // for the start-up verification done while the VM is being 488 // created. Callers be careful that you know that mutators 489 // aren't going to interfere -- for instance, this is permissible 490 // if we are still single-threaded and have either not yet 491 // started allocating (nothing much to verify) or we have 492 // started allocating but are now a full-fledged JavaThread 493 // (and have thus made our TLAB's) available for filling. 494 assert(SafepointSynchronize::is_at_safepoint() || 495 !is_init_completed(), 496 "Should only be called at a safepoint or at start-up" 497 " otherwise concurrent mutator activity may make heap " 498 " unparsable again"); 499 const bool use_tlab = UseTLAB; 500 const bool deferred = _defer_initial_card_mark; 501 // The main thread starts allocating via a TLAB even before it 502 // has added itself to the threads list at vm boot-up. 503 assert(!use_tlab || Threads::first() != NULL, 504 "Attempt to fill tlabs before main thread has been added" 505 " to threads list is doomed to failure!"); 506 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 507 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 508 #ifdef COMPILER2 509 // The deferred store barriers must all have been flushed to the 510 // card-table (or other remembered set structure) before GC starts 511 // processing the card-table (or other remembered set). 512 if (deferred) flush_deferred_store_barrier(thread); 513 #else 514 assert(!deferred, "Should be false"); 515 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 516 #endif 517 } 518 } 519 520 void CollectedHeap::accumulate_statistics_all_tlabs() { 521 if (UseTLAB) { 522 assert(SafepointSynchronize::is_at_safepoint() || 523 !is_init_completed(), 524 "should only accumulate statistics on tlabs at safepoint"); 525 526 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 527 } 528 } 529 530 void CollectedHeap::resize_all_tlabs() { 531 if (UseTLAB) { 532 assert(SafepointSynchronize::is_at_safepoint() || 533 !is_init_completed(), 534 "should only resize tlabs at safepoint"); 535 536 ThreadLocalAllocBuffer::resize_all_tlabs(); 537 } 538 } 539 540 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 541 if (HeapDumpBeforeFullGC) { 542 GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); 543 // We are doing a "major" collection and a heap dump before 544 // major collection has been requested. 545 HeapDumper::dump_heap(); 546 } 547 if (PrintClassHistogramBeforeFullGC) { 548 GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); 549 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); 550 inspector.doit(); 551 } 552 } 553 554 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 555 if (HeapDumpAfterFullGC) { 556 GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); 557 HeapDumper::dump_heap(); 558 } 559 if (PrintClassHistogramAfterFullGC) { 560 GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); 561 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); 562 inspector.doit(); 563 } 564 } 565 566 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) { 567 debug_only(check_for_valid_allocation_state()); 568 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 569 assert(size >= 0, "int won't convert to size_t"); 570 HeapWord* obj; 571 assert(ScavengeRootsInCode > 0, "must be"); 572 obj = common_mem_allocate_init(real_klass, size, CHECK_NULL); 573 post_allocation_setup_common(klass, obj); 574 assert(Universe::is_bootstrapping() || 575 !((oop)obj)->is_array(), "must not be an array"); 576 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 577 oop mirror = (oop)obj; 578 579 java_lang_Class::set_oop_size(mirror, size); 580 581 // Setup indirections 582 if (!real_klass.is_null()) { 583 java_lang_Class::set_klass(mirror, real_klass()); 584 real_klass->set_java_mirror(mirror); 585 } 586 587 InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass()); 588 assert(size == mk->instance_size(real_klass), "should have been set"); 589 590 // notify jvmti and dtrace 591 post_allocation_notify(klass, (oop)obj); 592 593 return mirror; 594 } 595 596 /////////////// Unit tests /////////////// 597 598 #ifndef PRODUCT 599 void CollectedHeap::test_is_in() { 600 CollectedHeap* heap = Universe::heap(); 601 602 uintptr_t epsilon = (uintptr_t) MinObjAlignment; 603 uintptr_t heap_start = (uintptr_t) heap->_reserved.start(); 604 uintptr_t heap_end = (uintptr_t) heap->_reserved.end(); 605 606 // Test that NULL is not in the heap. 607 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap"); 608 609 // Test that a pointer to before the heap start is reported as outside the heap. 610 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity"); 611 void* before_heap = (void*)(heap_start - epsilon); 612 assert(!heap->is_in(before_heap), 613 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap)); 614 615 // Test that a pointer to after the heap end is reported as outside the heap. 616 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity"); 617 void* after_heap = (void*)(heap_end + epsilon); 618 assert(!heap->is_in(after_heap), 619 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap)); 620 } 621 #endif