1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc_implementation/shared/gcHeapSummary.hpp" 28 #include "gc_implementation/shared/gcTrace.hpp" 29 #include "gc_implementation/shared/gcTraceTime.hpp" 30 #include "gc_implementation/shared/gcWhen.hpp" 31 #include "gc_implementation/shared/vmGCOperations.hpp" 32 #include "gc_interface/allocTracer.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "gc_interface/collectedHeap.inline.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "oops/instanceMirrorKlass.hpp" 37 #include "runtime/init.hpp" 38 #include "services/heapDumper.hpp" 39 #ifdef TARGET_OS_FAMILY_linux 40 # include "thread_linux.inline.hpp" 41 #endif 42 #ifdef TARGET_OS_FAMILY_solaris 43 # include "thread_solaris.inline.hpp" 44 #endif 45 #ifdef TARGET_OS_FAMILY_windows 46 # include "thread_windows.inline.hpp" 47 #endif 48 #ifdef TARGET_OS_FAMILY_bsd 49 # include "thread_bsd.inline.hpp" 50 #endif 51 52 53 #ifdef ASSERT 54 int CollectedHeap::_fire_out_of_memory_count = 0; 55 #endif 56 57 size_t CollectedHeap::_filler_array_max_size = 0; 58 59 const char* CollectedHeap::OverflowMessage 60 = "The size of the object heap + perm gen exceeds the maximum representable size"; 61 62 template <> 63 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 64 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 65 st->print_raw(m); 66 } 67 68 void GCHeapLog::log_heap(bool before) { 69 if (!should_log()) { 70 return; 71 } 72 73 double timestamp = fetch_timestamp(); 74 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 75 int index = compute_log_index(); 76 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 77 _records[index].timestamp = timestamp; 78 _records[index].data.is_before = before; 79 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 80 if (before) { 81 Universe::print_heap_before_gc(&st, true); 82 } else { 83 Universe::print_heap_after_gc(&st, true); 84 } 85 } 86 87 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 88 size_t capacity_in_words = capacity() / HeapWordSize; 89 90 return VirtualSpaceSummary( 91 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 92 } 93 94 GCHeapSummary CollectedHeap::create_heap_summary() { 95 VirtualSpaceSummary heap_space = create_heap_space_summary(); 96 return GCHeapSummary(heap_space, used()); 97 } 98 99 PermGenSummary CollectedHeap::create_perm_gen_summary() { 100 VirtualSpaceSummary perm_space = create_perm_gen_space_summary(); 101 SpaceSummary object_space(perm_space.start(), perm_space.committed_end(), permanent_used()); 102 103 return PermGenSummary(perm_space, object_space); 104 } 105 106 void CollectedHeap::print_heap_before_gc() { 107 if (PrintHeapAtGC) { 108 Universe::print_heap_before_gc(); 109 } 110 if (_gc_heap_log != NULL) { 111 _gc_heap_log->log_heap_before(); 112 } 113 } 114 115 void CollectedHeap::print_heap_after_gc() { 116 if (PrintHeapAtGC) { 117 Universe::print_heap_after_gc(); 118 } 119 if (_gc_heap_log != NULL) { 120 _gc_heap_log->log_heap_after(); 121 } 122 } 123 124 void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { 125 const GCHeapSummary& heap_summary = create_heap_summary(); 126 const PermGenSummary& perm_summary = create_perm_gen_summary(); 127 gc_tracer->report_gc_heap_summary(when, heap_summary, perm_summary); 128 } 129 130 void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) { 131 trace_heap(GCWhen::BeforeGC, gc_tracer); 132 } 133 134 void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) { 135 trace_heap(GCWhen::AfterGC, gc_tracer); 136 } 137 138 // Memory state functions. 139 140 141 CollectedHeap::CollectedHeap() : _n_par_threads(0) 142 { 143 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 144 const size_t elements_per_word = HeapWordSize / sizeof(jint); 145 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 146 max_len / elements_per_word); 147 148 _barrier_set = NULL; 149 _is_gc_active = false; 150 _total_collections = _total_full_collections = 0; 151 _gc_cause = _gc_lastcause = GCCause::_no_gc; 152 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 153 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 154 155 if (UsePerfData) { 156 EXCEPTION_MARK; 157 158 // create the gc cause jvmstat counters 159 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 160 80, GCCause::to_string(_gc_cause), CHECK); 161 162 _perf_gc_lastcause = 163 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 164 80, GCCause::to_string(_gc_lastcause), CHECK); 165 } 166 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. 167 // Create the ring log 168 if (LogEvents) { 169 _gc_heap_log = new GCHeapLog(); 170 } else { 171 _gc_heap_log = NULL; 172 } 173 } 174 175 void CollectedHeap::pre_initialize() { 176 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 177 // otherwise remains unused. 178 #ifdef COMPILER2 179 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() 180 && (DeferInitialCardMark || card_mark_must_follow_store()); 181 #else 182 assert(_defer_initial_card_mark == false, "Who would set it?"); 183 #endif 184 } 185 186 size_t CollectedHeap::add_and_check_overflow(size_t total, size_t size) { 187 assert(size >= 0, "must be"); 188 size_t result = total + size; 189 if (result < size) { 190 // We must have overflowed 191 vm_exit_during_initialization(CollectedHeap::OverflowMessage); 192 } 193 return result; 194 } 195 196 size_t CollectedHeap::round_up_and_check_overflow(size_t total, size_t size) { 197 assert(size >= 0, "must be"); 198 size_t result = round_to(total, size); 199 if (result < size) { 200 // We must have overflowed 201 vm_exit_during_initialization(CollectedHeap::OverflowMessage); 202 } 203 return result; 204 } 205 206 #ifndef PRODUCT 207 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 208 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 209 for (size_t slot = 0; slot < size; slot += 1) { 210 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 211 "Found badHeapWordValue in post-allocation check"); 212 } 213 } 214 } 215 216 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 217 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 218 for (size_t slot = 0; slot < size; slot += 1) { 219 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 220 "Found non badHeapWordValue in pre-allocation check"); 221 } 222 } 223 } 224 #endif // PRODUCT 225 226 #ifdef ASSERT 227 void CollectedHeap::check_for_valid_allocation_state() { 228 Thread *thread = Thread::current(); 229 // How to choose between a pending exception and a potential 230 // OutOfMemoryError? Don't allow pending exceptions. 231 // This is a VM policy failure, so how do we exhaustively test it? 232 assert(!thread->has_pending_exception(), 233 "shouldn't be allocating with pending exception"); 234 if (StrictSafepointChecks) { 235 assert(thread->allow_allocation(), 236 "Allocation done by thread for which allocation is blocked " 237 "by No_Allocation_Verifier!"); 238 // Allocation of an oop can always invoke a safepoint, 239 // hence, the true argument 240 thread->check_for_valid_safepoint_state(true); 241 } 242 } 243 #endif 244 245 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) { 246 247 // Retain tlab and allocate object in shared space if 248 // the amount free in the tlab is too large to discard. 249 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 250 thread->tlab().record_slow_allocation(size); 251 return NULL; 252 } 253 254 // Discard tlab and allocate a new one. 255 // To minimize fragmentation, the last TLAB may be smaller than the rest. 256 size_t new_tlab_size = thread->tlab().compute_size(size); 257 258 thread->tlab().clear_before_allocation(); 259 260 if (new_tlab_size == 0) { 261 return NULL; 262 } 263 264 // Allocate a new TLAB... 265 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 266 if (obj == NULL) { 267 return NULL; 268 } 269 270 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 271 272 if (ZeroTLAB) { 273 // ..and clear it. 274 Copy::zero_to_words(obj, new_tlab_size); 275 } else { 276 // ...and zap just allocated object. 277 #ifdef ASSERT 278 // Skip mangling the space corresponding to the object header to 279 // ensure that the returned space is not considered parsable by 280 // any concurrent GC thread. 281 size_t hdr_size = oopDesc::header_size(); 282 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 283 #endif // ASSERT 284 } 285 thread->tlab().fill(obj, obj + size, new_tlab_size); 286 return obj; 287 } 288 289 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 290 MemRegion deferred = thread->deferred_card_mark(); 291 if (!deferred.is_empty()) { 292 assert(_defer_initial_card_mark, "Otherwise should be empty"); 293 { 294 // Verify that the storage points to a parsable object in heap 295 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 296 assert(is_in(old_obj), "Not in allocated heap"); 297 assert(!can_elide_initializing_store_barrier(old_obj), 298 "Else should have been filtered in new_store_pre_barrier()"); 299 assert(!is_in_permanent(old_obj), "Sanity: not expected"); 300 assert(old_obj->is_oop(true), "Not an oop"); 301 assert(old_obj->is_parsable(), "Will not be concurrently parsable"); 302 assert(deferred.word_size() == (size_t)(old_obj->size()), 303 "Mismatch: multiple objects?"); 304 } 305 BarrierSet* bs = barrier_set(); 306 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 307 bs->write_region(deferred); 308 // "Clear" the deferred_card_mark field 309 thread->set_deferred_card_mark(MemRegion()); 310 } 311 assert(thread->deferred_card_mark().is_empty(), "invariant"); 312 } 313 314 // Helper for ReduceInitialCardMarks. For performance, 315 // compiled code may elide card-marks for initializing stores 316 // to a newly allocated object along the fast-path. We 317 // compensate for such elided card-marks as follows: 318 // (a) Generational, non-concurrent collectors, such as 319 // GenCollectedHeap(ParNew,DefNew,Tenured) and 320 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 321 // need the card-mark if and only if the region is 322 // in the old gen, and do not care if the card-mark 323 // succeeds or precedes the initializing stores themselves, 324 // so long as the card-mark is completed before the next 325 // scavenge. For all these cases, we can do a card mark 326 // at the point at which we do a slow path allocation 327 // in the old gen, i.e. in this call. 328 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 329 // in addition that the card-mark for an old gen allocated 330 // object strictly follow any associated initializing stores. 331 // In these cases, the memRegion remembered below is 332 // used to card-mark the entire region either just before the next 333 // slow-path allocation by this thread or just before the next scavenge or 334 // CMS-associated safepoint, whichever of these events happens first. 335 // (The implicit assumption is that the object has been fully 336 // initialized by this point, a fact that we assert when doing the 337 // card-mark.) 338 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 339 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is 340 // is used to remember the pre-value of any store. Initializing 341 // stores will not need this barrier, so we need not worry about 342 // compensating for the missing pre-barrier here. Turning now 343 // to the post-barrier, we note that G1 needs a RS update barrier 344 // which simply enqueues a (sequence of) dirty cards which may 345 // optionally be refined by the concurrent update threads. Note 346 // that this barrier need only be applied to a non-young write, 347 // but, like in CMS, because of the presence of concurrent refinement 348 // (much like CMS' precleaning), must strictly follow the oop-store. 349 // Thus, using the same protocol for maintaining the intended 350 // invariants turns out, serendepitously, to be the same for both 351 // G1 and CMS. 352 // 353 // For any future collector, this code should be reexamined with 354 // that specific collector in mind, and the documentation above suitably 355 // extended and updated. 356 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 357 // If a previous card-mark was deferred, flush it now. 358 flush_deferred_store_barrier(thread); 359 if (can_elide_initializing_store_barrier(new_obj)) { 360 // The deferred_card_mark region should be empty 361 // following the flush above. 362 assert(thread->deferred_card_mark().is_empty(), "Error"); 363 } else { 364 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 365 assert(!mr.is_empty(), "Error"); 366 if (_defer_initial_card_mark) { 367 // Defer the card mark 368 thread->set_deferred_card_mark(mr); 369 } else { 370 // Do the card mark 371 BarrierSet* bs = barrier_set(); 372 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 373 bs->write_region(mr); 374 } 375 } 376 return new_obj; 377 } 378 379 size_t CollectedHeap::filler_array_hdr_size() { 380 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 381 } 382 383 size_t CollectedHeap::filler_array_min_size() { 384 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 385 } 386 387 #ifdef ASSERT 388 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 389 { 390 assert(words >= min_fill_size(), "too small to fill"); 391 assert(words % MinObjAlignment == 0, "unaligned size"); 392 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 393 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 394 } 395 396 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 397 { 398 if (ZapFillerObjects && zap) { 399 Copy::fill_to_words(start + filler_array_hdr_size(), 400 words - filler_array_hdr_size(), 0XDEAFBABE); 401 } 402 } 403 #endif // ASSERT 404 405 void 406 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 407 { 408 assert(words >= filler_array_min_size(), "too small for an array"); 409 assert(words <= filler_array_max_size(), "too big for a single object"); 410 411 const size_t payload_size = words - filler_array_hdr_size(); 412 const size_t len = payload_size * HeapWordSize / sizeof(jint); 413 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len)); 414 415 // Set the length first for concurrent GC. 416 ((arrayOop)start)->set_length((int)len); 417 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 418 DEBUG_ONLY(zap_filler_array(start, words, zap);) 419 } 420 421 void 422 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 423 { 424 assert(words <= filler_array_max_size(), "too big for a single object"); 425 426 if (words >= filler_array_min_size()) { 427 fill_with_array(start, words, zap); 428 } else if (words > 0) { 429 assert(words == min_fill_size(), "unaligned size"); 430 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 431 } 432 } 433 434 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 435 { 436 DEBUG_ONLY(fill_args_check(start, words);) 437 HandleMark hm; // Free handles before leaving. 438 fill_with_object_impl(start, words, zap); 439 } 440 441 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 442 { 443 DEBUG_ONLY(fill_args_check(start, words);) 444 HandleMark hm; // Free handles before leaving. 445 446 #ifdef _LP64 447 // A single array can fill ~8G, so multiple objects are needed only in 64-bit. 448 // First fill with arrays, ensuring that any remaining space is big enough to 449 // fill. The remainder is filled with a single object. 450 const size_t min = min_fill_size(); 451 const size_t max = filler_array_max_size(); 452 while (words > max) { 453 const size_t cur = words - max >= min ? max : max - min; 454 fill_with_array(start, cur, zap); 455 start += cur; 456 words -= cur; 457 } 458 #endif 459 460 fill_with_object_impl(start, words, zap); 461 } 462 463 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 464 guarantee(false, "thread-local allocation buffers not supported"); 465 return NULL; 466 } 467 468 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 469 // The second disjunct in the assertion below makes a concession 470 // for the start-up verification done while the VM is being 471 // created. Callers be careful that you know that mutators 472 // aren't going to interfere -- for instance, this is permissible 473 // if we are still single-threaded and have either not yet 474 // started allocating (nothing much to verify) or we have 475 // started allocating but are now a full-fledged JavaThread 476 // (and have thus made our TLAB's) available for filling. 477 assert(SafepointSynchronize::is_at_safepoint() || 478 !is_init_completed(), 479 "Should only be called at a safepoint or at start-up" 480 " otherwise concurrent mutator activity may make heap " 481 " unparsable again"); 482 const bool use_tlab = UseTLAB; 483 const bool deferred = _defer_initial_card_mark; 484 // The main thread starts allocating via a TLAB even before it 485 // has added itself to the threads list at vm boot-up. 486 assert(!use_tlab || Threads::first() != NULL, 487 "Attempt to fill tlabs before main thread has been added" 488 " to threads list is doomed to failure!"); 489 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 490 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 491 #ifdef COMPILER2 492 // The deferred store barriers must all have been flushed to the 493 // card-table (or other remembered set structure) before GC starts 494 // processing the card-table (or other remembered set). 495 if (deferred) flush_deferred_store_barrier(thread); 496 #else 497 assert(!deferred, "Should be false"); 498 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 499 #endif 500 } 501 } 502 503 void CollectedHeap::accumulate_statistics_all_tlabs() { 504 if (UseTLAB) { 505 assert(SafepointSynchronize::is_at_safepoint() || 506 !is_init_completed(), 507 "should only accumulate statistics on tlabs at safepoint"); 508 509 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 510 } 511 } 512 513 void CollectedHeap::resize_all_tlabs() { 514 if (UseTLAB) { 515 assert(SafepointSynchronize::is_at_safepoint() || 516 !is_init_completed(), 517 "should only resize tlabs at safepoint"); 518 519 ThreadLocalAllocBuffer::resize_all_tlabs(); 520 } 521 } 522 523 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 524 if (HeapDumpBeforeFullGC) { 525 GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); 526 // We are doing a "major" collection and a heap dump before 527 // major collection has been requested. 528 HeapDumper::dump_heap(); 529 } 530 if (PrintClassHistogramBeforeFullGC) { 531 GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); 532 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); 533 inspector.doit(); 534 } 535 } 536 537 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 538 if (HeapDumpAfterFullGC) { 539 GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); 540 HeapDumper::dump_heap(); 541 } 542 if (PrintClassHistogramAfterFullGC) { 543 GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); 544 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); 545 inspector.doit(); 546 } 547 } 548 549 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) { 550 debug_only(check_for_valid_allocation_state()); 551 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 552 assert(size >= 0, "int won't convert to size_t"); 553 HeapWord* obj; 554 if (JavaObjectsInPerm) { 555 obj = common_permanent_mem_allocate_init(size, CHECK_NULL); 556 } else { 557 assert(ScavengeRootsInCode > 0, "must be"); 558 obj = common_mem_allocate_init(real_klass, size, CHECK_NULL); 559 } 560 post_allocation_setup_common(klass, obj); 561 assert(Universe::is_bootstrapping() || 562 !((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); 563 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 564 oop mirror = (oop)obj; 565 566 java_lang_Class::set_oop_size(mirror, size); 567 568 // Setup indirections 569 if (!real_klass.is_null()) { 570 java_lang_Class::set_klass(mirror, real_klass()); 571 real_klass->set_java_mirror(mirror); 572 } 573 574 instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass()); 575 assert(size == mk->instance_size(real_klass), "should have been set"); 576 577 // notify jvmti and dtrace 578 post_allocation_notify(klass, (oop)obj); 579 580 return mirror; 581 } 582 583 /////////////// Unit tests /////////////// 584 585 #ifndef PRODUCT 586 void CollectedHeap::test_is_in() { 587 CollectedHeap* heap = Universe::heap(); 588 589 uintptr_t epsilon = (uintptr_t) MinObjAlignment; 590 uintptr_t heap_start = (uintptr_t) heap->_reserved.start(); 591 uintptr_t heap_end = (uintptr_t) heap->_reserved.end(); 592 593 // Test that NULL is not in the heap. 594 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap"); 595 596 // Test that a pointer to before the heap start is reported as outside the heap. 597 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity"); 598 void* before_heap = (void*)(heap_start - epsilon); 599 assert(!heap->is_in(before_heap), 600 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap)); 601 602 // Test that a pointer to after the heap end is reported as outside the heap. 603 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity"); 604 void* after_heap = (void*)(heap_end + epsilon); 605 assert(!heap->is_in(after_heap), 606 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap)); 607 } 608 #endif