1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc_implementation/shared/gcHeapSummary.hpp" 28 #include "gc_implementation/shared/gcTrace.hpp" 29 #include "gc_implementation/shared/gcTraceTime.hpp" 30 #include "gc_implementation/shared/gcWhen.hpp" 31 #include "gc_implementation/shared/vmGCOperations.hpp" 32 #include "gc_interface/allocTracer.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "gc_interface/collectedHeap.inline.hpp" 35 #include "memory/barrierSet.inline.hpp" 36 #include "memory/metaspace.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "oops/instanceMirrorKlass.hpp" 39 #include "runtime/init.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "services/heapDumper.hpp" 42 43 44 #ifdef ASSERT 45 int CollectedHeap::_fire_out_of_memory_count = 0; 46 #endif 47 48 size_t CollectedHeap::_filler_array_max_size = 0; 49 50 template <> 51 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 52 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 53 st->print_raw(m); 54 } 55 56 void GCHeapLog::log_heap(bool before) { 57 if (!should_log()) { 58 return; 59 } 60 61 double timestamp = fetch_timestamp(); 62 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 63 int index = compute_log_index(); 64 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 65 _records[index].timestamp = timestamp; 66 _records[index].data.is_before = before; 67 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 68 if (before) { 69 Universe::print_heap_before_gc(&st, true); 70 } else { 71 Universe::print_heap_after_gc(&st, true); 72 } 73 } 74 75 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 76 size_t capacity_in_words = capacity() / HeapWordSize; 77 78 return VirtualSpaceSummary( 79 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 80 } 81 82 GCHeapSummary CollectedHeap::create_heap_summary() { 83 VirtualSpaceSummary heap_space = create_heap_space_summary(); 84 return GCHeapSummary(heap_space, used()); 85 } 86 87 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 88 const MetaspaceSizes meta_space( 89 MetaspaceAux::committed_bytes(), 90 MetaspaceAux::used_bytes(), 91 MetaspaceAux::reserved_bytes()); 92 const MetaspaceSizes data_space( 93 MetaspaceAux::committed_bytes(Metaspace::NonClassType), 94 MetaspaceAux::used_bytes(Metaspace::NonClassType), 95 MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); 96 const MetaspaceSizes class_space( 97 MetaspaceAux::committed_bytes(Metaspace::ClassType), 98 MetaspaceAux::used_bytes(Metaspace::ClassType), 99 MetaspaceAux::reserved_bytes(Metaspace::ClassType)); 100 101 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 102 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType); 103 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 104 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType); 105 106 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 107 ms_chunk_free_list_summary, class_chunk_free_list_summary); 108 } 109 110 void CollectedHeap::print_heap_before_gc() { 111 if (PrintHeapAtGC) { 112 Universe::print_heap_before_gc(); 113 } 114 if (_gc_heap_log != NULL) { 115 _gc_heap_log->log_heap_before(); 116 } 117 } 118 119 void CollectedHeap::print_heap_after_gc() { 120 if (PrintHeapAtGC) { 121 Universe::print_heap_after_gc(); 122 } 123 if (_gc_heap_log != NULL) { 124 _gc_heap_log->log_heap_after(); 125 } 126 } 127 128 void CollectedHeap::print_on_error(outputStream* st) const { 129 st->print_cr("Heap:"); 130 print_extended_on(st); 131 st->cr(); 132 133 _barrier_set->print_on(st); 134 } 135 136 void CollectedHeap::register_nmethod(nmethod* nm) { 137 assert_locked_or_safepoint(CodeCache_lock); 138 } 139 140 void CollectedHeap::unregister_nmethod(nmethod* nm) { 141 assert_locked_or_safepoint(CodeCache_lock); 142 } 143 144 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 145 const GCHeapSummary& heap_summary = create_heap_summary(); 146 gc_tracer->report_gc_heap_summary(when, heap_summary); 147 148 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 149 gc_tracer->report_metaspace_summary(when, metaspace_summary); 150 } 151 152 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 153 trace_heap(GCWhen::BeforeGC, gc_tracer); 154 } 155 156 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 157 trace_heap(GCWhen::AfterGC, gc_tracer); 158 } 159 160 // Memory state functions. 161 162 163 CollectedHeap::CollectedHeap() : _n_par_threads(0) 164 { 165 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 166 const size_t elements_per_word = HeapWordSize / sizeof(jint); 167 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 168 max_len / elements_per_word); 169 170 _barrier_set = NULL; 171 _is_gc_active = false; 172 _total_collections = _total_full_collections = 0; 173 _gc_cause = _gc_lastcause = GCCause::_no_gc; 174 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 175 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 176 177 if (UsePerfData) { 178 EXCEPTION_MARK; 179 180 // create the gc cause jvmstat counters 181 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 182 80, GCCause::to_string(_gc_cause), CHECK); 183 184 _perf_gc_lastcause = 185 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 186 80, GCCause::to_string(_gc_lastcause), CHECK); 187 } 188 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. 189 // Create the ring log 190 if (LogEvents) { 191 _gc_heap_log = new GCHeapLog(); 192 } else { 193 _gc_heap_log = NULL; 194 } 195 } 196 197 // This interface assumes that it's being called by the 198 // vm thread. It collects the heap assuming that the 199 // heap lock is already held and that we are executing in 200 // the context of the vm thread. 201 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 202 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 203 assert(Heap_lock->is_locked(), "Precondition#2"); 204 GCCauseSetter gcs(this, cause); 205 switch (cause) { 206 case GCCause::_heap_inspection: 207 case GCCause::_heap_dump: 208 case GCCause::_metadata_GC_threshold : { 209 HandleMark hm; 210 do_full_collection(false); // don't clear all soft refs 211 break; 212 } 213 case GCCause::_last_ditch_collection: { 214 HandleMark hm; 215 do_full_collection(true); // do clear all soft refs 216 break; 217 } 218 default: 219 ShouldNotReachHere(); // Unexpected use of this function 220 } 221 } 222 223 void CollectedHeap::pre_initialize() { 224 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 225 // otherwise remains unused. 226 #ifdef COMPILER2 227 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() 228 && (DeferInitialCardMark || card_mark_must_follow_store()); 229 #else 230 assert(_defer_initial_card_mark == false, "Who would set it?"); 231 #endif 232 } 233 234 #ifndef PRODUCT 235 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 236 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 237 for (size_t slot = 0; slot < size; slot += 1) { 238 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 239 "Found badHeapWordValue in post-allocation check"); 240 } 241 } 242 } 243 244 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 245 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 246 for (size_t slot = 0; slot < size; slot += 1) { 247 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 248 "Found non badHeapWordValue in pre-allocation check"); 249 } 250 } 251 } 252 #endif // PRODUCT 253 254 #ifdef ASSERT 255 void CollectedHeap::check_for_valid_allocation_state() { 256 Thread *thread = Thread::current(); 257 // How to choose between a pending exception and a potential 258 // OutOfMemoryError? Don't allow pending exceptions. 259 // This is a VM policy failure, so how do we exhaustively test it? 260 assert(!thread->has_pending_exception(), 261 "shouldn't be allocating with pending exception"); 262 if (StrictSafepointChecks) { 263 assert(thread->allow_allocation(), 264 "Allocation done by thread for which allocation is blocked " 265 "by No_Allocation_Verifier!"); 266 // Allocation of an oop can always invoke a safepoint, 267 // hence, the true argument 268 thread->check_for_valid_safepoint_state(true); 269 } 270 } 271 #endif 272 273 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) { 274 275 // Retain tlab and allocate object in shared space if 276 // the amount free in the tlab is too large to discard. 277 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 278 thread->tlab().record_slow_allocation(size); 279 return NULL; 280 } 281 282 // Discard tlab and allocate a new one. 283 // To minimize fragmentation, the last TLAB may be smaller than the rest. 284 size_t new_tlab_size = thread->tlab().compute_size(size); 285 286 thread->tlab().clear_before_allocation(); 287 288 if (new_tlab_size == 0) { 289 return NULL; 290 } 291 292 // Allocate a new TLAB... 293 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 294 if (obj == NULL) { 295 return NULL; 296 } 297 298 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 299 300 if (ZeroTLAB) { 301 // ..and clear it. 302 Copy::zero_to_words(obj, new_tlab_size); 303 } else { 304 // ...and zap just allocated object. 305 #ifdef ASSERT 306 // Skip mangling the space corresponding to the object header to 307 // ensure that the returned space is not considered parsable by 308 // any concurrent GC thread. 309 size_t hdr_size = oopDesc::header_size(); 310 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 311 #endif // ASSERT 312 } 313 thread->tlab().fill(obj, obj + size, new_tlab_size); 314 return obj; 315 } 316 317 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 318 MemRegion deferred = thread->deferred_card_mark(); 319 if (!deferred.is_empty()) { 320 assert(_defer_initial_card_mark, "Otherwise should be empty"); 321 { 322 // Verify that the storage points to a parsable object in heap 323 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 324 assert(is_in(old_obj), "Not in allocated heap"); 325 assert(!can_elide_initializing_store_barrier(old_obj), 326 "Else should have been filtered in new_store_pre_barrier()"); 327 assert(old_obj->is_oop(true), "Not an oop"); 328 assert(deferred.word_size() == (size_t)(old_obj->size()), 329 "Mismatch: multiple objects?"); 330 } 331 BarrierSet* bs = barrier_set(); 332 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 333 bs->write_region(deferred); 334 // "Clear" the deferred_card_mark field 335 thread->set_deferred_card_mark(MemRegion()); 336 } 337 assert(thread->deferred_card_mark().is_empty(), "invariant"); 338 } 339 340 size_t CollectedHeap::max_tlab_size() const { 341 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 342 // This restriction could be removed by enabling filling with multiple arrays. 343 // If we compute that the reasonable way as 344 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 345 // we'll overflow on the multiply, so we do the divide first. 346 // We actually lose a little by dividing first, 347 // but that just makes the TLAB somewhat smaller than the biggest array, 348 // which is fine, since we'll be able to fill that. 349 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 350 sizeof(jint) * 351 ((juint) max_jint / (size_t) HeapWordSize); 352 return align_size_down(max_int_size, MinObjAlignment); 353 } 354 355 // Helper for ReduceInitialCardMarks. For performance, 356 // compiled code may elide card-marks for initializing stores 357 // to a newly allocated object along the fast-path. We 358 // compensate for such elided card-marks as follows: 359 // (a) Generational, non-concurrent collectors, such as 360 // GenCollectedHeap(ParNew,DefNew,Tenured) and 361 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 362 // need the card-mark if and only if the region is 363 // in the old gen, and do not care if the card-mark 364 // succeeds or precedes the initializing stores themselves, 365 // so long as the card-mark is completed before the next 366 // scavenge. For all these cases, we can do a card mark 367 // at the point at which we do a slow path allocation 368 // in the old gen, i.e. in this call. 369 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 370 // in addition that the card-mark for an old gen allocated 371 // object strictly follow any associated initializing stores. 372 // In these cases, the memRegion remembered below is 373 // used to card-mark the entire region either just before the next 374 // slow-path allocation by this thread or just before the next scavenge or 375 // CMS-associated safepoint, whichever of these events happens first. 376 // (The implicit assumption is that the object has been fully 377 // initialized by this point, a fact that we assert when doing the 378 // card-mark.) 379 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 380 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is 381 // is used to remember the pre-value of any store. Initializing 382 // stores will not need this barrier, so we need not worry about 383 // compensating for the missing pre-barrier here. Turning now 384 // to the post-barrier, we note that G1 needs a RS update barrier 385 // which simply enqueues a (sequence of) dirty cards which may 386 // optionally be refined by the concurrent update threads. Note 387 // that this barrier need only be applied to a non-young write, 388 // but, like in CMS, because of the presence of concurrent refinement 389 // (much like CMS' precleaning), must strictly follow the oop-store. 390 // Thus, using the same protocol for maintaining the intended 391 // invariants turns out, serendepitously, to be the same for both 392 // G1 and CMS. 393 // 394 // For any future collector, this code should be reexamined with 395 // that specific collector in mind, and the documentation above suitably 396 // extended and updated. 397 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 398 // If a previous card-mark was deferred, flush it now. 399 flush_deferred_store_barrier(thread); 400 if (can_elide_initializing_store_barrier(new_obj)) { 401 // The deferred_card_mark region should be empty 402 // following the flush above. 403 assert(thread->deferred_card_mark().is_empty(), "Error"); 404 } else { 405 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 406 assert(!mr.is_empty(), "Error"); 407 if (_defer_initial_card_mark) { 408 // Defer the card mark 409 thread->set_deferred_card_mark(mr); 410 } else { 411 // Do the card mark 412 BarrierSet* bs = barrier_set(); 413 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 414 bs->write_region(mr); 415 } 416 } 417 return new_obj; 418 } 419 420 size_t CollectedHeap::filler_array_hdr_size() { 421 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 422 } 423 424 size_t CollectedHeap::filler_array_min_size() { 425 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 426 } 427 428 #ifdef ASSERT 429 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 430 { 431 assert(words >= min_fill_size(), "too small to fill"); 432 assert(words % MinObjAlignment == 0, "unaligned size"); 433 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 434 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 435 } 436 437 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 438 { 439 if (ZapFillerObjects && zap) { 440 Copy::fill_to_words(start + filler_array_hdr_size(), 441 words - filler_array_hdr_size(), 0XDEAFBABE); 442 } 443 } 444 #endif // ASSERT 445 446 void 447 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 448 { 449 assert(words >= filler_array_min_size(), "too small for an array"); 450 assert(words <= filler_array_max_size(), "too big for a single object"); 451 452 const size_t payload_size = words - filler_array_hdr_size(); 453 const size_t len = payload_size * HeapWordSize / sizeof(jint); 454 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len)); 455 456 // Set the length first for concurrent GC. 457 ((arrayOop)start)->set_length((int)len); 458 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 459 DEBUG_ONLY(zap_filler_array(start, words, zap);) 460 } 461 462 void 463 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 464 { 465 assert(words <= filler_array_max_size(), "too big for a single object"); 466 467 if (words >= filler_array_min_size()) { 468 fill_with_array(start, words, zap); 469 } else if (words > 0) { 470 assert(words == min_fill_size(), "unaligned size"); 471 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 472 } 473 } 474 475 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 476 { 477 DEBUG_ONLY(fill_args_check(start, words);) 478 HandleMark hm; // Free handles before leaving. 479 fill_with_object_impl(start, words, zap); 480 } 481 482 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 483 { 484 DEBUG_ONLY(fill_args_check(start, words);) 485 HandleMark hm; // Free handles before leaving. 486 487 #ifdef _LP64 488 // A single array can fill ~8G, so multiple objects are needed only in 64-bit. 489 // First fill with arrays, ensuring that any remaining space is big enough to 490 // fill. The remainder is filled with a single object. 491 const size_t min = min_fill_size(); 492 const size_t max = filler_array_max_size(); 493 while (words > max) { 494 const size_t cur = words - max >= min ? max : max - min; 495 fill_with_array(start, cur, zap); 496 start += cur; 497 words -= cur; 498 } 499 #endif 500 501 fill_with_object_impl(start, words, zap); 502 } 503 504 void CollectedHeap::post_initialize() { 505 collector_policy()->post_heap_initialize(); 506 } 507 508 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 509 guarantee(false, "thread-local allocation buffers not supported"); 510 return NULL; 511 } 512 513 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 514 // The second disjunct in the assertion below makes a concession 515 // for the start-up verification done while the VM is being 516 // created. Callers be careful that you know that mutators 517 // aren't going to interfere -- for instance, this is permissible 518 // if we are still single-threaded and have either not yet 519 // started allocating (nothing much to verify) or we have 520 // started allocating but are now a full-fledged JavaThread 521 // (and have thus made our TLAB's) available for filling. 522 assert(SafepointSynchronize::is_at_safepoint() || 523 !is_init_completed(), 524 "Should only be called at a safepoint or at start-up" 525 " otherwise concurrent mutator activity may make heap " 526 " unparsable again"); 527 const bool use_tlab = UseTLAB; 528 const bool deferred = _defer_initial_card_mark; 529 // The main thread starts allocating via a TLAB even before it 530 // has added itself to the threads list at vm boot-up. 531 assert(!use_tlab || Threads::first() != NULL, 532 "Attempt to fill tlabs before main thread has been added" 533 " to threads list is doomed to failure!"); 534 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 535 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 536 #ifdef COMPILER2 537 // The deferred store barriers must all have been flushed to the 538 // card-table (or other remembered set structure) before GC starts 539 // processing the card-table (or other remembered set). 540 if (deferred) flush_deferred_store_barrier(thread); 541 #else 542 assert(!deferred, "Should be false"); 543 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 544 #endif 545 } 546 } 547 548 void CollectedHeap::accumulate_statistics_all_tlabs() { 549 if (UseTLAB) { 550 assert(SafepointSynchronize::is_at_safepoint() || 551 !is_init_completed(), 552 "should only accumulate statistics on tlabs at safepoint"); 553 554 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 555 } 556 } 557 558 void CollectedHeap::resize_all_tlabs() { 559 if (UseTLAB) { 560 assert(SafepointSynchronize::is_at_safepoint() || 561 !is_init_completed(), 562 "should only resize tlabs at safepoint"); 563 564 ThreadLocalAllocBuffer::resize_all_tlabs(); 565 } 566 } 567 568 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 569 if (HeapDumpBeforeFullGC) { 570 GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create()); 571 // We are doing a "major" collection and a heap dump before 572 // major collection has been requested. 573 HeapDumper::dump_heap(); 574 } 575 if (PrintClassHistogramBeforeFullGC) { 576 GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create()); 577 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); 578 inspector.doit(); 579 } 580 } 581 582 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 583 if (HeapDumpAfterFullGC) { 584 GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create()); 585 HeapDumper::dump_heap(); 586 } 587 if (PrintClassHistogramAfterFullGC) { 588 GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create()); 589 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); 590 inspector.doit(); 591 } 592 } 593 594 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 595 // It is important to do this in a way such that concurrent readers can't 596 // temporarily think something is in the heap. (Seen this happen in asserts.) 597 _reserved.set_word_size(0); 598 _reserved.set_start(start); 599 _reserved.set_end(end); 600 } 601 602 /////////////// Unit tests /////////////// 603 604 #ifndef PRODUCT 605 void CollectedHeap::test_is_in() { 606 CollectedHeap* heap = Universe::heap(); 607 608 uintptr_t epsilon = (uintptr_t) MinObjAlignment; 609 uintptr_t heap_start = (uintptr_t) heap->_reserved.start(); 610 uintptr_t heap_end = (uintptr_t) heap->_reserved.end(); 611 612 // Test that NULL is not in the heap. 613 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap"); 614 615 // Test that a pointer to before the heap start is reported as outside the heap. 616 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity"); 617 void* before_heap = (void*)(heap_start - epsilon); 618 assert(!heap->is_in(before_heap), 619 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(before_heap))); 620 621 // Test that a pointer to after the heap end is reported as outside the heap. 622 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity"); 623 void* after_heap = (void*)(heap_end + epsilon); 624 assert(!heap->is_in(after_heap), 625 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(after_heap))); 626 } 627 #endif