1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcHeapSummary.hpp" 32 #include "gc/shared/gcTrace.hpp" 33 #include "gc/shared/gcTraceTime.inline.hpp" 34 #include "gc/shared/gcWhen.hpp" 35 #include "gc/shared/vmGCOperations.hpp" 36 #include "logging/log.hpp" 37 #include "memory/metaspace.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/instanceMirrorKlass.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/heapMonitoring.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "services/heapDumper.hpp" 45 #include "utilities/align.hpp" 46 47 48 #ifdef ASSERT 49 int CollectedHeap::_fire_out_of_memory_count = 0; 50 #endif 51 52 size_t CollectedHeap::_filler_array_max_size = 0; 53 54 template <> 55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 56 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 57 st->print_raw(m); 58 } 59 60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 61 if (!should_log()) { 62 return; 63 } 64 65 double timestamp = fetch_timestamp(); 66 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 67 int index = compute_log_index(); 68 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 69 _records[index].timestamp = timestamp; 70 _records[index].data.is_before = before; 71 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 72 73 st.print_cr("{Heap %s GC invocations=%u (full %u):", 74 before ? "before" : "after", 75 heap->total_collections(), 76 heap->total_full_collections()); 77 78 heap->print_on(&st); 79 st.print_cr("}"); 80 } 81 82 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 83 size_t capacity_in_words = capacity() / HeapWordSize; 84 85 return VirtualSpaceSummary( 86 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 87 } 88 89 GCHeapSummary CollectedHeap::create_heap_summary() { 90 VirtualSpaceSummary heap_space = create_heap_space_summary(); 91 return GCHeapSummary(heap_space, used()); 92 } 93 94 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 95 const MetaspaceSizes meta_space( 96 MetaspaceAux::committed_bytes(), 97 MetaspaceAux::used_bytes(), 98 MetaspaceAux::reserved_bytes()); 99 const MetaspaceSizes data_space( 100 MetaspaceAux::committed_bytes(Metaspace::NonClassType), 101 MetaspaceAux::used_bytes(Metaspace::NonClassType), 102 MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); 103 const MetaspaceSizes class_space( 104 MetaspaceAux::committed_bytes(Metaspace::ClassType), 105 MetaspaceAux::used_bytes(Metaspace::ClassType), 106 MetaspaceAux::reserved_bytes(Metaspace::ClassType)); 107 108 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 109 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType); 110 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 111 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType); 112 113 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 114 ms_chunk_free_list_summary, class_chunk_free_list_summary); 115 } 116 117 void CollectedHeap::print_heap_before_gc() { 118 Universe::print_heap_before_gc(); 119 if (_gc_heap_log != NULL) { 120 _gc_heap_log->log_heap_before(this); 121 } 122 } 123 124 void CollectedHeap::print_heap_after_gc() { 125 Universe::print_heap_after_gc(); 126 if (_gc_heap_log != NULL) { 127 _gc_heap_log->log_heap_after(this); 128 } 129 } 130 131 void CollectedHeap::print_on_error(outputStream* st) const { 132 st->print_cr("Heap:"); 133 print_extended_on(st); 134 st->cr(); 135 136 _barrier_set->print_on(st); 137 } 138 139 void CollectedHeap::register_nmethod(nmethod* nm) { 140 assert_locked_or_safepoint(CodeCache_lock); 141 } 142 143 void CollectedHeap::unregister_nmethod(nmethod* nm) { 144 assert_locked_or_safepoint(CodeCache_lock); 145 } 146 147 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 148 const GCHeapSummary& heap_summary = create_heap_summary(); 149 gc_tracer->report_gc_heap_summary(when, heap_summary); 150 151 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 152 gc_tracer->report_metaspace_summary(when, metaspace_summary); 153 } 154 155 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 156 trace_heap(GCWhen::BeforeGC, gc_tracer); 157 } 158 159 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 160 trace_heap(GCWhen::AfterGC, gc_tracer); 161 } 162 163 // WhiteBox API support for concurrent collectors. These are the 164 // default implementations, for collectors which don't support this 165 // feature. 166 bool CollectedHeap::supports_concurrent_phase_control() const { 167 return false; 168 } 169 170 const char* const* CollectedHeap::concurrent_phases() const { 171 static const char* const result[] = { NULL }; 172 return result; 173 } 174 175 bool CollectedHeap::request_concurrent_phase(const char* phase) { 176 return false; 177 } 178 179 // Memory state functions. 180 181 182 CollectedHeap::CollectedHeap() : 183 _barrier_set(NULL), 184 _is_gc_active(false), 185 _total_collections(0), 186 _total_full_collections(0), 187 _gc_cause(GCCause::_no_gc), 188 _gc_lastcause(GCCause::_no_gc), 189 _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below. 190 { 191 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 192 const size_t elements_per_word = HeapWordSize / sizeof(jint); 193 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 194 max_len / elements_per_word); 195 196 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 197 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 198 199 if (UsePerfData) { 200 EXCEPTION_MARK; 201 202 // create the gc cause jvmstat counters 203 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 204 80, GCCause::to_string(_gc_cause), CHECK); 205 206 _perf_gc_lastcause = 207 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 208 80, GCCause::to_string(_gc_lastcause), CHECK); 209 } 210 211 // Create the ring log 212 if (LogEvents) { 213 _gc_heap_log = new GCHeapLog(); 214 } else { 215 _gc_heap_log = NULL; 216 } 217 } 218 219 // This interface assumes that it's being called by the 220 // vm thread. It collects the heap assuming that the 221 // heap lock is already held and that we are executing in 222 // the context of the vm thread. 223 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 224 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 225 assert(Heap_lock->is_locked(), "Precondition#2"); 226 GCCauseSetter gcs(this, cause); 227 switch (cause) { 228 case GCCause::_heap_inspection: 229 case GCCause::_heap_dump: 230 case GCCause::_metadata_GC_threshold : { 231 HandleMark hm; 232 do_full_collection(false); // don't clear all soft refs 233 break; 234 } 235 case GCCause::_metadata_GC_clear_soft_refs: { 236 HandleMark hm; 237 do_full_collection(true); // do clear all soft refs 238 break; 239 } 240 default: 241 ShouldNotReachHere(); // Unexpected use of this function 242 } 243 } 244 245 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) { 246 _barrier_set = barrier_set; 247 oopDesc::set_bs(_barrier_set); 248 } 249 250 void CollectedHeap::pre_initialize() { 251 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 252 // otherwise remains unused. 253 #if defined(COMPILER2) || INCLUDE_JVMCI 254 _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() 255 && (DeferInitialCardMark || card_mark_must_follow_store()); 256 #else 257 assert(_defer_initial_card_mark == false, "Who would set it?"); 258 #endif 259 } 260 261 #ifndef PRODUCT 262 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 263 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 264 for (size_t slot = 0; slot < size; slot += 1) { 265 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 266 "Found badHeapWordValue in post-allocation check"); 267 } 268 } 269 } 270 271 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 272 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 273 for (size_t slot = 0; slot < size; slot += 1) { 274 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 275 "Found non badHeapWordValue in pre-allocation check"); 276 } 277 } 278 } 279 #endif // PRODUCT 280 281 #ifdef ASSERT 282 void CollectedHeap::check_for_valid_allocation_state() { 283 Thread *thread = Thread::current(); 284 // How to choose between a pending exception and a potential 285 // OutOfMemoryError? Don't allow pending exceptions. 286 // This is a VM policy failure, so how do we exhaustively test it? 287 assert(!thread->has_pending_exception(), 288 "shouldn't be allocating with pending exception"); 289 if (StrictSafepointChecks) { 290 assert(thread->allow_allocation(), 291 "Allocation done by thread for which allocation is blocked " 292 "by No_Allocation_Verifier!"); 293 // Allocation of an oop can always invoke a safepoint, 294 // hence, the true argument 295 thread->check_for_valid_safepoint_state(true); 296 } 297 } 298 #endif 299 300 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { 301 // We can come here for three reasons: 302 // - We either really did fill the tlab. 303 // - We pretended to everyone we did and we want to sample. 304 // - Both of the above reasons are true at the same time. 305 if (HeapMonitoring::enabled()) { 306 if (thread->tlab().should_sample()) { 307 HeapWord *end = thread->tlab().end(); 308 thread->tlab().set_back_actual_end(); 309 310 // If we don't have an object yet, try to allocate it. 311 if (obj == NULL) { 312 // The tlab could still have space after this sample. 313 obj = thread->tlab().allocate(size); 314 } 315 316 // Is the object allocated now? 317 // If not, this means we have to wait till a new TLAB, let the subsequent 318 // call to handle_heap_sampling pick the next sample. 319 if (obj != NULL) { 320 // Object is allocated, sample it now. 321 HeapMonitoring::object_alloc_do_sample(thread, 322 reinterpret_cast<oopDesc*>(obj), 323 size * HeapWordSize); 324 // Pick a next sample in this case, we allocated right. 325 thread->tlab().pick_next_sample(thread->tlab().top() - end); 326 } 327 } 328 } 329 330 return obj; 331 } 332 333 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { 334 HeapWord* obj = handle_heap_sampling(thread, NULL, size); 335 bool should_sample = thread->tlab().should_sample(); 336 337 if (obj != NULL) { 338 return obj; 339 } 340 341 // Retain tlab and allocate object in shared space if 342 // the amount free in the tlab is too large to discard. 343 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 344 thread->tlab().record_slow_allocation(size); 345 return NULL; 346 } 347 348 // Discard tlab and allocate a new one. 349 // To minimize fragmentation, the last TLAB may be smaller than the rest. 350 size_t new_tlab_size = thread->tlab().compute_size(size); 351 352 thread->tlab().clear_before_allocation(); 353 354 if (new_tlab_size == 0) { 355 return NULL; 356 } 357 358 // Allocate a new TLAB... 359 obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 360 if (obj == NULL) { 361 return NULL; 362 } 363 364 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 365 366 if (ZeroTLAB) { 367 // ..and clear it. 368 Copy::zero_to_words(obj, new_tlab_size); 369 } else { 370 // ...and zap just allocated object. 371 #ifdef ASSERT 372 // Skip mangling the space corresponding to the object header to 373 // ensure that the returned space is not considered parsable by 374 // any concurrent GC thread. 375 size_t hdr_size = oopDesc::header_size(); 376 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 377 #endif // ASSERT 378 } 379 thread->tlab().fill(obj, obj + size, new_tlab_size); 380 381 if (should_sample) { 382 return handle_heap_sampling(thread, obj, size); 383 } else { 384 return obj; 385 } 386 } 387 388 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 389 MemRegion deferred = thread->deferred_card_mark(); 390 if (!deferred.is_empty()) { 391 assert(_defer_initial_card_mark, "Otherwise should be empty"); 392 { 393 // Verify that the storage points to a parsable object in heap 394 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 395 assert(is_in(old_obj), "Not in allocated heap"); 396 assert(!can_elide_initializing_store_barrier(old_obj), 397 "Else should have been filtered in new_store_pre_barrier()"); 398 assert(oopDesc::is_oop(old_obj, true), "Not an oop"); 399 assert(deferred.word_size() == (size_t)(old_obj->size()), 400 "Mismatch: multiple objects?"); 401 } 402 BarrierSet* bs = barrier_set(); 403 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 404 bs->write_region(deferred); 405 // "Clear" the deferred_card_mark field 406 thread->set_deferred_card_mark(MemRegion()); 407 } 408 assert(thread->deferred_card_mark().is_empty(), "invariant"); 409 } 410 411 size_t CollectedHeap::max_tlab_size() const { 412 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 413 // This restriction could be removed by enabling filling with multiple arrays. 414 // If we compute that the reasonable way as 415 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 416 // we'll overflow on the multiply, so we do the divide first. 417 // We actually lose a little by dividing first, 418 // but that just makes the TLAB somewhat smaller than the biggest array, 419 // which is fine, since we'll be able to fill that. 420 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 421 sizeof(jint) * 422 ((juint) max_jint / (size_t) HeapWordSize); 423 return align_down(max_int_size, MinObjAlignment); 424 } 425 426 // Helper for ReduceInitialCardMarks. For performance, 427 // compiled code may elide card-marks for initializing stores 428 // to a newly allocated object along the fast-path. We 429 // compensate for such elided card-marks as follows: 430 // (a) Generational, non-concurrent collectors, such as 431 // GenCollectedHeap(ParNew,DefNew,Tenured) and 432 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 433 // need the card-mark if and only if the region is 434 // in the old gen, and do not care if the card-mark 435 // succeeds or precedes the initializing stores themselves, 436 // so long as the card-mark is completed before the next 437 // scavenge. For all these cases, we can do a card mark 438 // at the point at which we do a slow path allocation 439 // in the old gen, i.e. in this call. 440 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 441 // in addition that the card-mark for an old gen allocated 442 // object strictly follow any associated initializing stores. 443 // In these cases, the memRegion remembered below is 444 // used to card-mark the entire region either just before the next 445 // slow-path allocation by this thread or just before the next scavenge or 446 // CMS-associated safepoint, whichever of these events happens first. 447 // (The implicit assumption is that the object has been fully 448 // initialized by this point, a fact that we assert when doing the 449 // card-mark.) 450 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 451 // G1 concurrent marking is in progress an SATB (pre-write-)barrier 452 // is used to remember the pre-value of any store. Initializing 453 // stores will not need this barrier, so we need not worry about 454 // compensating for the missing pre-barrier here. Turning now 455 // to the post-barrier, we note that G1 needs a RS update barrier 456 // which simply enqueues a (sequence of) dirty cards which may 457 // optionally be refined by the concurrent update threads. Note 458 // that this barrier need only be applied to a non-young write, 459 // but, like in CMS, because of the presence of concurrent refinement 460 // (much like CMS' precleaning), must strictly follow the oop-store. 461 // Thus, using the same protocol for maintaining the intended 462 // invariants turns out, serendepitously, to be the same for both 463 // G1 and CMS. 464 // 465 // For any future collector, this code should be reexamined with 466 // that specific collector in mind, and the documentation above suitably 467 // extended and updated. 468 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 469 // If a previous card-mark was deferred, flush it now. 470 flush_deferred_store_barrier(thread); 471 if (can_elide_initializing_store_barrier(new_obj) || 472 new_obj->is_typeArray()) { 473 // Arrays of non-references don't need a pre-barrier. 474 // The deferred_card_mark region should be empty 475 // following the flush above. 476 assert(thread->deferred_card_mark().is_empty(), "Error"); 477 } else { 478 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 479 assert(!mr.is_empty(), "Error"); 480 if (_defer_initial_card_mark) { 481 // Defer the card mark 482 thread->set_deferred_card_mark(mr); 483 } else { 484 // Do the card mark 485 BarrierSet* bs = barrier_set(); 486 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 487 bs->write_region(mr); 488 } 489 } 490 return new_obj; 491 } 492 493 size_t CollectedHeap::filler_array_hdr_size() { 494 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 495 } 496 497 size_t CollectedHeap::filler_array_min_size() { 498 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 499 } 500 501 #ifdef ASSERT 502 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 503 { 504 assert(words >= min_fill_size(), "too small to fill"); 505 assert(is_object_aligned(words), "unaligned size"); 506 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 507 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 508 } 509 510 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 511 { 512 if (ZapFillerObjects && zap) { 513 Copy::fill_to_words(start + filler_array_hdr_size(), 514 words - filler_array_hdr_size(), 0XDEAFBABE); 515 } 516 } 517 #endif // ASSERT 518 519 void 520 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 521 { 522 assert(words >= filler_array_min_size(), "too small for an array"); 523 assert(words <= filler_array_max_size(), "too big for a single object"); 524 525 const size_t payload_size = words - filler_array_hdr_size(); 526 const size_t len = payload_size * HeapWordSize / sizeof(jint); 527 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 528 529 // Set the length first for concurrent GC. 530 ((arrayOop)start)->set_length((int)len); 531 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 532 DEBUG_ONLY(zap_filler_array(start, words, zap);) 533 } 534 535 void 536 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 537 { 538 assert(words <= filler_array_max_size(), "too big for a single object"); 539 540 if (words >= filler_array_min_size()) { 541 fill_with_array(start, words, zap); 542 } else if (words > 0) { 543 assert(words == min_fill_size(), "unaligned size"); 544 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 545 } 546 } 547 548 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 549 { 550 DEBUG_ONLY(fill_args_check(start, words);) 551 HandleMark hm; // Free handles before leaving. 552 fill_with_object_impl(start, words, zap); 553 } 554 555 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 556 { 557 DEBUG_ONLY(fill_args_check(start, words);) 558 HandleMark hm; // Free handles before leaving. 559 560 // Multiple objects may be required depending on the filler array maximum size. Fill 561 // the range up to that with objects that are filler_array_max_size sized. The 562 // remainder is filled with a single object. 563 const size_t min = min_fill_size(); 564 const size_t max = filler_array_max_size(); 565 while (words > max) { 566 const size_t cur = (words - max) >= min ? max : max - min; 567 fill_with_array(start, cur, zap); 568 start += cur; 569 words -= cur; 570 } 571 572 fill_with_object_impl(start, words, zap); 573 } 574 575 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 576 guarantee(false, "thread-local allocation buffers not supported"); 577 return NULL; 578 } 579 580 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 581 // The second disjunct in the assertion below makes a concession 582 // for the start-up verification done while the VM is being 583 // created. Callers be careful that you know that mutators 584 // aren't going to interfere -- for instance, this is permissible 585 // if we are still single-threaded and have either not yet 586 // started allocating (nothing much to verify) or we have 587 // started allocating but are now a full-fledged JavaThread 588 // (and have thus made our TLAB's) available for filling. 589 assert(SafepointSynchronize::is_at_safepoint() || 590 !is_init_completed(), 591 "Should only be called at a safepoint or at start-up" 592 " otherwise concurrent mutator activity may make heap " 593 " unparsable again"); 594 const bool use_tlab = UseTLAB; 595 const bool deferred = _defer_initial_card_mark; 596 // The main thread starts allocating via a TLAB even before it 597 // has added itself to the threads list at vm boot-up. 598 assert(!use_tlab || Threads::first() != NULL, 599 "Attempt to fill tlabs before main thread has been added" 600 " to threads list is doomed to failure!"); 601 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 602 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 603 #if defined(COMPILER2) || INCLUDE_JVMCI 604 // The deferred store barriers must all have been flushed to the 605 // card-table (or other remembered set structure) before GC starts 606 // processing the card-table (or other remembered set). 607 if (deferred) flush_deferred_store_barrier(thread); 608 #else 609 assert(!deferred, "Should be false"); 610 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 611 #endif 612 } 613 } 614 615 void CollectedHeap::accumulate_statistics_all_tlabs() { 616 if (UseTLAB) { 617 assert(SafepointSynchronize::is_at_safepoint() || 618 !is_init_completed(), 619 "should only accumulate statistics on tlabs at safepoint"); 620 621 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 622 } 623 } 624 625 void CollectedHeap::resize_all_tlabs() { 626 if (UseTLAB) { 627 assert(SafepointSynchronize::is_at_safepoint() || 628 !is_init_completed(), 629 "should only resize tlabs at safepoint"); 630 631 ThreadLocalAllocBuffer::resize_all_tlabs(); 632 } 633 } 634 635 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 636 assert(timer != NULL, "timer is null"); 637 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 638 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 639 HeapDumper::dump_heap(); 640 } 641 642 LogTarget(Trace, gc, classhisto) lt; 643 if (lt.is_enabled()) { 644 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 645 ResourceMark rm; 646 LogStream ls(lt); 647 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 648 inspector.doit(); 649 } 650 } 651 652 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 653 full_gc_dump(timer, true); 654 } 655 656 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 657 full_gc_dump(timer, false); 658 } 659 660 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 661 // It is important to do this in a way such that concurrent readers can't 662 // temporarily think something is in the heap. (Seen this happen in asserts.) 663 _reserved.set_word_size(0); 664 _reserved.set_start(start); 665 _reserved.set_end(end); 666 }