1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcHeapSummary.hpp" 32 #include "gc/shared/gcTrace.hpp" 33 #include "gc/shared/gcTraceTime.inline.hpp" 34 #include "gc/shared/gcWhen.hpp" 35 #include "gc/shared/vmGCOperations.hpp" 36 #include "logging/log.hpp" 37 #include "memory/metaspace.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/instanceMirrorKlass.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/init.hpp" 42 #include "runtime/thread.inline.hpp" 43 #include "services/heapDumper.hpp" 44 45 46 #ifdef ASSERT 47 int CollectedHeap::_fire_out_of_memory_count = 0; 48 #endif 49 50 size_t CollectedHeap::_filler_array_max_size = 0; 51 52 template <> 53 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 54 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 55 st->print_raw(m); 56 } 57 58 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 59 if (!should_log()) { 60 return; 61 } 62 63 double timestamp = fetch_timestamp(); 64 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 65 int index = compute_log_index(); 66 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 67 _records[index].timestamp = timestamp; 68 _records[index].data.is_before = before; 69 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 70 71 st.print_cr("{Heap %s GC invocations=%u (full %u):", 72 before ? "before" : "after", 73 heap->total_collections(), 74 heap->total_full_collections()); 75 76 heap->print_on(&st); 77 st.print_cr("}"); 78 } 79 80 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 81 size_t capacity_in_words = capacity() / HeapWordSize; 82 83 return VirtualSpaceSummary( 84 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 85 } 86 87 GCHeapSummary CollectedHeap::create_heap_summary() { 88 VirtualSpaceSummary heap_space = create_heap_space_summary(); 89 return GCHeapSummary(heap_space, used()); 90 } 91 92 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 93 const MetaspaceSizes meta_space( 94 MetaspaceAux::committed_bytes(), 95 MetaspaceAux::used_bytes(), 96 MetaspaceAux::reserved_bytes()); 97 const MetaspaceSizes data_space( 98 MetaspaceAux::committed_bytes(Metaspace::NonClassType), 99 MetaspaceAux::used_bytes(Metaspace::NonClassType), 100 MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); 101 const MetaspaceSizes class_space( 102 MetaspaceAux::committed_bytes(Metaspace::ClassType), 103 MetaspaceAux::used_bytes(Metaspace::ClassType), 104 MetaspaceAux::reserved_bytes(Metaspace::ClassType)); 105 106 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 107 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType); 108 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 109 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType); 110 111 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 112 ms_chunk_free_list_summary, class_chunk_free_list_summary); 113 } 114 115 void CollectedHeap::print_heap_before_gc() { 116 Universe::print_heap_before_gc(); 117 if (_gc_heap_log != NULL) { 118 _gc_heap_log->log_heap_before(this); 119 } 120 } 121 122 void CollectedHeap::print_heap_after_gc() { 123 Universe::print_heap_after_gc(); 124 if (_gc_heap_log != NULL) { 125 _gc_heap_log->log_heap_after(this); 126 } 127 } 128 129 void CollectedHeap::print_on_error(outputStream* st) const { 130 st->print_cr("Heap:"); 131 print_extended_on(st); 132 st->cr(); 133 134 _barrier_set->print_on(st); 135 } 136 137 void CollectedHeap::register_nmethod(nmethod* nm) { 138 assert_locked_or_safepoint(CodeCache_lock); 139 } 140 141 void CollectedHeap::unregister_nmethod(nmethod* nm) { 142 assert_locked_or_safepoint(CodeCache_lock); 143 } 144 145 void CollectedHeap::pin_object(oop o) { 146 // Defaults to no-op 147 } 148 149 void CollectedHeap::unpin_object(oop o) { 150 // Defaults to no-op 151 } 152 153 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 154 const GCHeapSummary& heap_summary = create_heap_summary(); 155 gc_tracer->report_gc_heap_summary(when, heap_summary); 156 157 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 158 gc_tracer->report_metaspace_summary(when, metaspace_summary); 159 } 160 161 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 162 trace_heap(GCWhen::BeforeGC, gc_tracer); 163 } 164 165 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 166 trace_heap(GCWhen::AfterGC, gc_tracer); 167 } 168 169 // Memory state functions. 170 171 172 CollectedHeap::CollectedHeap() : 173 _barrier_set(NULL), 174 _is_gc_active(false), 175 _total_collections(0), 176 _total_full_collections(0), 177 _gc_cause(GCCause::_no_gc), 178 _gc_lastcause(GCCause::_no_gc), 179 _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below. 180 { 181 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 182 const size_t elements_per_word = HeapWordSize / sizeof(jint); 183 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 184 max_len / elements_per_word); 185 186 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 187 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 188 189 if (UsePerfData) { 190 EXCEPTION_MARK; 191 192 // create the gc cause jvmstat counters 193 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 194 80, GCCause::to_string(_gc_cause), CHECK); 195 196 _perf_gc_lastcause = 197 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 198 80, GCCause::to_string(_gc_lastcause), CHECK); 199 } 200 201 // Create the ring log 202 if (LogEvents) { 203 _gc_heap_log = new GCHeapLog(); 204 } else { 205 _gc_heap_log = NULL; 206 } 207 } 208 209 // This interface assumes that it's being called by the 210 // vm thread. It collects the heap assuming that the 211 // heap lock is already held and that we are executing in 212 // the context of the vm thread. 213 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 214 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 215 assert(Heap_lock->is_locked(), "Precondition#2"); 216 GCCauseSetter gcs(this, cause); 217 switch (cause) { 218 case GCCause::_heap_inspection: 219 case GCCause::_heap_dump: 220 case GCCause::_metadata_GC_threshold : { 221 HandleMark hm; 222 do_full_collection(false); // don't clear all soft refs 223 break; 224 } 225 case GCCause::_metadata_GC_clear_soft_refs: { 226 HandleMark hm; 227 do_full_collection(true); // do clear all soft refs 228 break; 229 } 230 default: 231 ShouldNotReachHere(); // Unexpected use of this function 232 } 233 } 234 235 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) { 236 _barrier_set = barrier_set; 237 oopDesc::set_bs(_barrier_set); 238 } 239 240 void CollectedHeap::pre_initialize() { 241 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 242 // otherwise remains unused. 243 #if defined(COMPILER2) || INCLUDE_JVMCI 244 _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() 245 && (DeferInitialCardMark || card_mark_must_follow_store()); 246 #else 247 assert(_defer_initial_card_mark == false, "Who would set it?"); 248 #endif 249 } 250 251 #ifndef PRODUCT 252 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 253 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 254 for (size_t slot = 0; slot < size; slot += 1) { 255 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 256 "Found badHeapWordValue in post-allocation check"); 257 } 258 } 259 } 260 261 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 262 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 263 for (size_t slot = 0; slot < size; slot += 1) { 264 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 265 "Found non badHeapWordValue in pre-allocation check"); 266 } 267 } 268 } 269 #endif // PRODUCT 270 271 #ifdef ASSERT 272 void CollectedHeap::check_for_valid_allocation_state() { 273 Thread *thread = Thread::current(); 274 // How to choose between a pending exception and a potential 275 // OutOfMemoryError? Don't allow pending exceptions. 276 // This is a VM policy failure, so how do we exhaustively test it? 277 assert(!thread->has_pending_exception(), 278 "shouldn't be allocating with pending exception"); 279 if (StrictSafepointChecks) { 280 assert(thread->allow_allocation(), 281 "Allocation done by thread for which allocation is blocked " 282 "by No_Allocation_Verifier!"); 283 // Allocation of an oop can always invoke a safepoint, 284 // hence, the true argument 285 thread->check_for_valid_safepoint_state(true); 286 } 287 } 288 #endif 289 290 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) { 291 292 // Retain tlab and allocate object in shared space if 293 // the amount free in the tlab is too large to discard. 294 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 295 thread->tlab().record_slow_allocation(size); 296 return NULL; 297 } 298 299 // Discard tlab and allocate a new one. 300 // To minimize fragmentation, the last TLAB may be smaller than the rest. 301 size_t new_tlab_size = thread->tlab().compute_size(size); 302 303 thread->tlab().clear_before_allocation(); 304 305 if (new_tlab_size == 0) { 306 return NULL; 307 } 308 309 // Allocate a new TLAB... 310 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 311 if (obj == NULL) { 312 return NULL; 313 } 314 315 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 316 317 if (ZeroTLAB) { 318 // ..and clear it. 319 Copy::zero_to_words(obj, new_tlab_size); 320 } else { 321 // ...and zap just allocated object. 322 #ifdef ASSERT 323 // Skip mangling the space corresponding to the object header to 324 // ensure that the returned space is not considered parsable by 325 // any concurrent GC thread. 326 size_t hdr_size = oopDesc::header_size(); 327 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 328 #endif // ASSERT 329 } 330 thread->tlab().fill(obj, obj + size, new_tlab_size); 331 return Universe::heap()->tlab_post_allocation_setup(obj); 332 } 333 334 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 335 MemRegion deferred = thread->deferred_card_mark(); 336 if (!deferred.is_empty()) { 337 assert(_defer_initial_card_mark, "Otherwise should be empty"); 338 { 339 // Verify that the storage points to a parsable object in heap 340 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 341 assert(is_in(old_obj), "Not in allocated heap"); 342 assert(!can_elide_initializing_store_barrier(old_obj), 343 "Else should have been filtered in new_store_pre_barrier()"); 344 assert(old_obj->is_oop(true), "Not an oop"); 345 assert(deferred.word_size() == (size_t)(old_obj->size()), 346 "Mismatch: multiple objects?"); 347 } 348 BarrierSet* bs = barrier_set(); 349 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 350 bs->write_region(deferred); 351 // "Clear" the deferred_card_mark field 352 thread->set_deferred_card_mark(MemRegion()); 353 } 354 assert(thread->deferred_card_mark().is_empty(), "invariant"); 355 } 356 357 size_t CollectedHeap::max_tlab_size() const { 358 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 359 // This restriction could be removed by enabling filling with multiple arrays. 360 // If we compute that the reasonable way as 361 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 362 // we'll overflow on the multiply, so we do the divide first. 363 // We actually lose a little by dividing first, 364 // but that just makes the TLAB somewhat smaller than the biggest array, 365 // which is fine, since we'll be able to fill that. 366 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 367 sizeof(jint) * 368 ((juint) max_jint / (size_t) HeapWordSize); 369 return align_size_down(max_int_size, MinObjAlignment); 370 } 371 372 // Helper for ReduceInitialCardMarks. For performance, 373 // compiled code may elide card-marks for initializing stores 374 // to a newly allocated object along the fast-path. We 375 // compensate for such elided card-marks as follows: 376 // (a) Generational, non-concurrent collectors, such as 377 // GenCollectedHeap(ParNew,DefNew,Tenured) and 378 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 379 // need the card-mark if and only if the region is 380 // in the old gen, and do not care if the card-mark 381 // succeeds or precedes the initializing stores themselves, 382 // so long as the card-mark is completed before the next 383 // scavenge. For all these cases, we can do a card mark 384 // at the point at which we do a slow path allocation 385 // in the old gen, i.e. in this call. 386 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 387 // in addition that the card-mark for an old gen allocated 388 // object strictly follow any associated initializing stores. 389 // In these cases, the memRegion remembered below is 390 // used to card-mark the entire region either just before the next 391 // slow-path allocation by this thread or just before the next scavenge or 392 // CMS-associated safepoint, whichever of these events happens first. 393 // (The implicit assumption is that the object has been fully 394 // initialized by this point, a fact that we assert when doing the 395 // card-mark.) 396 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 397 // G1 concurrent marking is in progress an SATB (pre-write-)barrier 398 // is used to remember the pre-value of any store. Initializing 399 // stores will not need this barrier, so we need not worry about 400 // compensating for the missing pre-barrier here. Turning now 401 // to the post-barrier, we note that G1 needs a RS update barrier 402 // which simply enqueues a (sequence of) dirty cards which may 403 // optionally be refined by the concurrent update threads. Note 404 // that this barrier need only be applied to a non-young write, 405 // but, like in CMS, because of the presence of concurrent refinement 406 // (much like CMS' precleaning), must strictly follow the oop-store. 407 // Thus, using the same protocol for maintaining the intended 408 // invariants turns out, serendepitously, to be the same for both 409 // G1 and CMS. 410 // 411 // For any future collector, this code should be reexamined with 412 // that specific collector in mind, and the documentation above suitably 413 // extended and updated. 414 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 415 // If a previous card-mark was deferred, flush it now. 416 flush_deferred_store_barrier(thread); 417 if (can_elide_initializing_store_barrier(new_obj) || 418 new_obj->is_typeArray()) { 419 // Arrays of non-references don't need a pre-barrier. 420 // The deferred_card_mark region should be empty 421 // following the flush above. 422 assert(thread->deferred_card_mark().is_empty(), "Error"); 423 } else { 424 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 425 assert(!mr.is_empty(), "Error"); 426 if (_defer_initial_card_mark) { 427 // Defer the card mark 428 thread->set_deferred_card_mark(mr); 429 } else { 430 // Do the card mark 431 BarrierSet* bs = barrier_set(); 432 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 433 bs->write_region(mr); 434 } 435 } 436 return new_obj; 437 } 438 439 size_t CollectedHeap::filler_array_hdr_size() { 440 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 441 } 442 443 size_t CollectedHeap::filler_array_min_size() { 444 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 445 } 446 447 #ifdef ASSERT 448 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 449 { 450 assert(words >= min_fill_size(), "too small to fill"); 451 assert(words % MinObjAlignment == 0, "unaligned size"); 452 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 453 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 454 } 455 456 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 457 { 458 if (ZapFillerObjects && zap) { 459 Copy::fill_to_words(start + filler_array_hdr_size(), 460 words - filler_array_hdr_size(), 0XDEAFBABE); 461 } 462 } 463 #endif // ASSERT 464 465 void 466 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 467 { 468 assert(words >= filler_array_min_size(), "too small for an array"); 469 assert(words <= filler_array_max_size(), "too big for a single object"); 470 471 const size_t payload_size = words - filler_array_hdr_size(); 472 const size_t len = payload_size * HeapWordSize / sizeof(jint); 473 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 474 475 // Set the length first for concurrent GC. 476 ((arrayOop)start)->set_length((int)len); 477 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 478 DEBUG_ONLY(zap_filler_array(start, words, zap);) 479 } 480 481 void 482 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 483 { 484 assert(words <= filler_array_max_size(), "too big for a single object"); 485 486 if (words >= filler_array_min_size()) { 487 fill_with_array(start, words, zap); 488 } else if (words > 0) { 489 assert(words == min_fill_size(), "unaligned size"); 490 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 491 } 492 } 493 494 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 495 { 496 DEBUG_ONLY(fill_args_check(start, words);) 497 HandleMark hm; // Free handles before leaving. 498 fill_with_object_impl(start, words, zap); 499 } 500 501 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 502 { 503 DEBUG_ONLY(fill_args_check(start, words);) 504 HandleMark hm; // Free handles before leaving. 505 506 // Multiple objects may be required depending on the filler array maximum size. Fill 507 // the range up to that with objects that are filler_array_max_size sized. The 508 // remainder is filled with a single object. 509 const size_t min = min_fill_size(); 510 const size_t max = filler_array_max_size(); 511 while (words > max) { 512 const size_t cur = (words - max) >= min ? max : max - min; 513 fill_with_array(start, cur, zap); 514 start += cur; 515 words -= cur; 516 } 517 518 fill_with_object_impl(start, words, zap); 519 } 520 521 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 522 guarantee(false, "thread-local allocation buffers not supported"); 523 return NULL; 524 } 525 526 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 527 // The second disjunct in the assertion below makes a concession 528 // for the start-up verification done while the VM is being 529 // created. Callers be careful that you know that mutators 530 // aren't going to interfere -- for instance, this is permissible 531 // if we are still single-threaded and have either not yet 532 // started allocating (nothing much to verify) or we have 533 // started allocating but are now a full-fledged JavaThread 534 // (and have thus made our TLAB's) available for filling. 535 assert(SafepointSynchronize::is_at_safepoint() || 536 !is_init_completed(), 537 "Should only be called at a safepoint or at start-up" 538 " otherwise concurrent mutator activity may make heap " 539 " unparsable again"); 540 const bool use_tlab = UseTLAB; 541 const bool deferred = _defer_initial_card_mark; 542 // The main thread starts allocating via a TLAB even before it 543 // has added itself to the threads list at vm boot-up. 544 assert(!use_tlab || Threads::first() != NULL, 545 "Attempt to fill tlabs before main thread has been added" 546 " to threads list is doomed to failure!"); 547 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 548 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 549 #if defined(COMPILER2) || INCLUDE_JVMCI 550 // The deferred store barriers must all have been flushed to the 551 // card-table (or other remembered set structure) before GC starts 552 // processing the card-table (or other remembered set). 553 if (deferred) flush_deferred_store_barrier(thread); 554 #else 555 assert(!deferred, "Should be false"); 556 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 557 #endif 558 } 559 } 560 561 void CollectedHeap::accumulate_statistics_all_tlabs() { 562 if (UseTLAB) { 563 assert(SafepointSynchronize::is_at_safepoint() || 564 !is_init_completed(), 565 "should only accumulate statistics on tlabs at safepoint"); 566 567 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 568 } 569 } 570 571 void CollectedHeap::resize_all_tlabs() { 572 if (UseTLAB) { 573 assert(SafepointSynchronize::is_at_safepoint() || 574 !is_init_completed(), 575 "should only resize tlabs at safepoint"); 576 577 ThreadLocalAllocBuffer::resize_all_tlabs(); 578 } 579 } 580 581 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 582 assert(timer != NULL, "timer is null"); 583 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 584 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 585 HeapDumper::dump_heap(); 586 } 587 588 Log(gc, classhisto) log; 589 if (log.is_trace()) { 590 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 591 ResourceMark rm; 592 VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */); 593 inspector.doit(); 594 } 595 } 596 597 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 598 full_gc_dump(timer, true); 599 } 600 601 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 602 full_gc_dump(timer, false); 603 } 604 605 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 606 // It is important to do this in a way such that concurrent readers can't 607 // temporarily think something is in the heap. (Seen this happen in asserts.) 608 _reserved.set_word_size(0); 609 _reserved.set_start(start); 610 _reserved.set_end(end); 611 } 612 HeapWord* CollectedHeap::tlab_post_allocation_setup(HeapWord* obj) { 613 return obj; 614 } 615 616 uint CollectedHeap::oop_extra_words() { 617 // Default implementation doesn't need extra space for oops. 618 return 0; 619 } 620 621 void CollectedHeap::accumulate_statistics_all_gclabs() { 622 // Default implementation does nothing. 623 } 624 625 void CollectedHeap::deflate_idle_monitors_all_threads() { 626 ObjectSynchronizer::deflate_idle_monitors_all_threads(); 627 } 628 629 class DeflateIdleMonitorsThreadClosure : public ThreadClosure { 630 public: 631 void do_thread(Thread* thread) { 632 ObjectSynchronizer::deflate_idle_monitors_and_oops_do(thread, NULL); 633 } 634 }; 635 636 void CollectedHeap::parallel_deflate_idle_monitors(WorkGang* workers) { 637 StrongRootsScope(workers->active_workers()); 638 DeflateIdleMonitorsThreadClosure cl; 639 Threads::parallel_threads_do(&cl); 640 } 641 642 #ifndef CC_INTERP 643 void CollectedHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) { 644 // Default implementation does nothing. 645 } 646 #endif