1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcHeapSummary.hpp" 32 #include "gc/shared/gcTrace.hpp" 33 #include "gc/shared/gcTraceTime.inline.hpp" 34 #include "gc/shared/gcWhen.hpp" 35 #include "gc/shared/vmGCOperations.hpp" 36 #include "logging/log.hpp" 37 #include "memory/metaspace.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/instanceMirrorKlass.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/heapMonitoring.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "services/heapDumper.hpp" 45 #include "utilities/align.hpp" 46 47 48 #ifdef ASSERT 49 int CollectedHeap::_fire_out_of_memory_count = 0; 50 #endif 51 52 size_t CollectedHeap::_filler_array_max_size = 0; 53 54 template <> 55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 56 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 57 st->print_raw(m); 58 } 59 60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 61 if (!should_log()) { 62 return; 63 } 64 65 double timestamp = fetch_timestamp(); 66 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 67 int index = compute_log_index(); 68 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 69 _records[index].timestamp = timestamp; 70 _records[index].data.is_before = before; 71 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 72 73 st.print_cr("{Heap %s GC invocations=%u (full %u):", 74 before ? "before" : "after", 75 heap->total_collections(), 76 heap->total_full_collections()); 77 78 heap->print_on(&st); 79 st.print_cr("}"); 80 } 81 82 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 83 size_t capacity_in_words = capacity() / HeapWordSize; 84 85 return VirtualSpaceSummary( 86 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 87 } 88 89 GCHeapSummary CollectedHeap::create_heap_summary() { 90 VirtualSpaceSummary heap_space = create_heap_space_summary(); 91 return GCHeapSummary(heap_space, used()); 92 } 93 94 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 95 const MetaspaceSizes meta_space( 96 MetaspaceAux::committed_bytes(), 97 MetaspaceAux::used_bytes(), 98 MetaspaceAux::reserved_bytes()); 99 const MetaspaceSizes data_space( 100 MetaspaceAux::committed_bytes(Metaspace::NonClassType), 101 MetaspaceAux::used_bytes(Metaspace::NonClassType), 102 MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); 103 const MetaspaceSizes class_space( 104 MetaspaceAux::committed_bytes(Metaspace::ClassType), 105 MetaspaceAux::used_bytes(Metaspace::ClassType), 106 MetaspaceAux::reserved_bytes(Metaspace::ClassType)); 107 108 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 109 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType); 110 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 111 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType); 112 113 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 114 ms_chunk_free_list_summary, class_chunk_free_list_summary); 115 } 116 117 void CollectedHeap::print_heap_before_gc() { 118 Universe::print_heap_before_gc(); 119 if (_gc_heap_log != NULL) { 120 _gc_heap_log->log_heap_before(this); 121 } 122 } 123 124 void CollectedHeap::print_heap_after_gc() { 125 Universe::print_heap_after_gc(); 126 if (_gc_heap_log != NULL) { 127 _gc_heap_log->log_heap_after(this); 128 } 129 } 130 131 void CollectedHeap::print_on_error(outputStream* st) const { 132 st->print_cr("Heap:"); 133 print_extended_on(st); 134 st->cr(); 135 136 _barrier_set->print_on(st); 137 } 138 139 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 140 const GCHeapSummary& heap_summary = create_heap_summary(); 141 gc_tracer->report_gc_heap_summary(when, heap_summary); 142 143 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 144 gc_tracer->report_metaspace_summary(when, metaspace_summary); 145 } 146 147 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 148 trace_heap(GCWhen::BeforeGC, gc_tracer); 149 } 150 151 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 152 trace_heap(GCWhen::AfterGC, gc_tracer); 153 } 154 155 // WhiteBox API support for concurrent collectors. These are the 156 // default implementations, for collectors which don't support this 157 // feature. 158 bool CollectedHeap::supports_concurrent_phase_control() const { 159 return false; 160 } 161 162 const char* const* CollectedHeap::concurrent_phases() const { 163 static const char* const result[] = { NULL }; 164 return result; 165 } 166 167 bool CollectedHeap::request_concurrent_phase(const char* phase) { 168 return false; 169 } 170 171 // Memory state functions. 172 173 174 CollectedHeap::CollectedHeap() : 175 _barrier_set(NULL), 176 _is_gc_active(false), 177 _total_collections(0), 178 _total_full_collections(0), 179 _gc_cause(GCCause::_no_gc), 180 _gc_lastcause(GCCause::_no_gc), 181 _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below. 182 { 183 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 184 const size_t elements_per_word = HeapWordSize / sizeof(jint); 185 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 186 max_len / elements_per_word); 187 188 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 189 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 190 191 if (UsePerfData) { 192 EXCEPTION_MARK; 193 194 // create the gc cause jvmstat counters 195 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 196 80, GCCause::to_string(_gc_cause), CHECK); 197 198 _perf_gc_lastcause = 199 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 200 80, GCCause::to_string(_gc_lastcause), CHECK); 201 } 202 203 // Create the ring log 204 if (LogEvents) { 205 _gc_heap_log = new GCHeapLog(); 206 } else { 207 _gc_heap_log = NULL; 208 } 209 } 210 211 // This interface assumes that it's being called by the 212 // vm thread. It collects the heap assuming that the 213 // heap lock is already held and that we are executing in 214 // the context of the vm thread. 215 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 216 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 217 assert(Heap_lock->is_locked(), "Precondition#2"); 218 GCCauseSetter gcs(this, cause); 219 switch (cause) { 220 case GCCause::_heap_inspection: 221 case GCCause::_heap_dump: 222 case GCCause::_metadata_GC_threshold : { 223 HandleMark hm; 224 do_full_collection(false); // don't clear all soft refs 225 break; 226 } 227 case GCCause::_metadata_GC_clear_soft_refs: { 228 HandleMark hm; 229 do_full_collection(true); // do clear all soft refs 230 break; 231 } 232 default: 233 ShouldNotReachHere(); // Unexpected use of this function 234 } 235 } 236 237 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) { 238 _barrier_set = barrier_set; 239 oopDesc::set_bs(_barrier_set); 240 } 241 242 void CollectedHeap::pre_initialize() { 243 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 244 // otherwise remains unused. 245 #if defined(COMPILER2) || INCLUDE_JVMCI 246 _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() 247 && (DeferInitialCardMark || card_mark_must_follow_store()); 248 #else 249 assert(_defer_initial_card_mark == false, "Who would set it?"); 250 #endif 251 } 252 253 #ifndef PRODUCT 254 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 255 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 256 for (size_t slot = 0; slot < size; slot += 1) { 257 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 258 "Found badHeapWordValue in post-allocation check"); 259 } 260 } 261 } 262 263 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 264 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 265 for (size_t slot = 0; slot < size; slot += 1) { 266 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 267 "Found non badHeapWordValue in pre-allocation check"); 268 } 269 } 270 } 271 #endif // PRODUCT 272 273 #ifdef ASSERT 274 void CollectedHeap::check_for_valid_allocation_state() { 275 Thread *thread = Thread::current(); 276 // How to choose between a pending exception and a potential 277 // OutOfMemoryError? Don't allow pending exceptions. 278 // This is a VM policy failure, so how do we exhaustively test it? 279 assert(!thread->has_pending_exception(), 280 "shouldn't be allocating with pending exception"); 281 if (StrictSafepointChecks) { 282 assert(thread->allow_allocation(), 283 "Allocation done by thread for which allocation is blocked " 284 "by No_Allocation_Verifier!"); 285 // Allocation of an oop can always invoke a safepoint, 286 // hence, the true argument 287 thread->check_for_valid_safepoint_state(true); 288 } 289 } 290 #endif 291 292 293 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj, 294 size_t size, size_t overflowed_words) { 295 // Object is allocated, sample it now. 296 HeapMonitoring::object_alloc_do_sample(thread, 297 reinterpret_cast<oopDesc*>(obj), 298 size * HeapWordSize); 299 // Pick a next sample in this case, we allocated right. 300 thread->tlab().pick_next_sample(overflowed_words); 301 } 302 303 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) { 304 thread->tlab().set_back_actual_end(); 305 306 // The tlab could still have space after this sample. 307 return thread->tlab().allocate(size); 308 } 309 310 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { 311 // In case the tlab changes, remember if this one wanted a sample. 312 bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample(); 313 314 HeapWord* obj = NULL; 315 if (should_sample) { 316 // Remember the tlab end to fix up the sampling rate. 317 HeapWord *tlab_old_end = thread->tlab().end(); 318 obj = allocate_sampled_object(thread, size); 319 320 // If we did allocate in this tlab, sample it. Otherwise, we wait for the 321 // new tlab's first allocation at the end of this method. 322 if (obj != NULL) { 323 // Fix sample rate by removing the extra words allocated in this last 324 // sample. 325 size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end); 326 sample_allocation(thread, obj, size, overflowed_words); 327 return obj; 328 } 329 } 330 331 // Retain tlab and allocate object in shared space if 332 // the amount free in the tlab is too large to discard. 333 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 334 thread->tlab().record_slow_allocation(size); 335 return NULL; 336 } 337 338 // Discard tlab and allocate a new one. 339 // To minimize fragmentation, the last TLAB may be smaller than the rest. 340 size_t new_tlab_size = thread->tlab().compute_size(size); 341 342 thread->tlab().clear_before_allocation(); 343 344 if (new_tlab_size == 0) { 345 return NULL; 346 } 347 348 // Allocate a new TLAB... 349 obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 350 if (obj == NULL) { 351 return NULL; 352 } 353 354 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 355 356 if (ZeroTLAB) { 357 // ..and clear it. 358 Copy::zero_to_words(obj, new_tlab_size); 359 } else { 360 // ...and zap just allocated object. 361 #ifdef ASSERT 362 // Skip mangling the space corresponding to the object header to 363 // ensure that the returned space is not considered parsable by 364 // any concurrent GC thread. 365 size_t hdr_size = oopDesc::header_size(); 366 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 367 #endif // ASSERT 368 } 369 thread->tlab().fill(obj, obj + size, new_tlab_size); 370 371 // Did we initially want to sample? 372 if (should_sample) { 373 sample_allocation(thread, obj, size); 374 } 375 return obj; 376 } 377 378 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 379 MemRegion deferred = thread->deferred_card_mark(); 380 if (!deferred.is_empty()) { 381 assert(_defer_initial_card_mark, "Otherwise should be empty"); 382 { 383 // Verify that the storage points to a parsable object in heap 384 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 385 assert(is_in(old_obj), "Not in allocated heap"); 386 assert(!can_elide_initializing_store_barrier(old_obj), 387 "Else should have been filtered in new_store_pre_barrier()"); 388 assert(oopDesc::is_oop(old_obj, true), "Not an oop"); 389 assert(deferred.word_size() == (size_t)(old_obj->size()), 390 "Mismatch: multiple objects?"); 391 } 392 BarrierSet* bs = barrier_set(); 393 bs->write_region(deferred); 394 // "Clear" the deferred_card_mark field 395 thread->set_deferred_card_mark(MemRegion()); 396 } 397 assert(thread->deferred_card_mark().is_empty(), "invariant"); 398 } 399 400 size_t CollectedHeap::max_tlab_size() const { 401 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 402 // This restriction could be removed by enabling filling with multiple arrays. 403 // If we compute that the reasonable way as 404 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 405 // we'll overflow on the multiply, so we do the divide first. 406 // We actually lose a little by dividing first, 407 // but that just makes the TLAB somewhat smaller than the biggest array, 408 // which is fine, since we'll be able to fill that. 409 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 410 sizeof(jint) * 411 ((juint) max_jint / (size_t) HeapWordSize); 412 return align_down(max_int_size, MinObjAlignment); 413 } 414 415 // Helper for ReduceInitialCardMarks. For performance, 416 // compiled code may elide card-marks for initializing stores 417 // to a newly allocated object along the fast-path. We 418 // compensate for such elided card-marks as follows: 419 // (a) Generational, non-concurrent collectors, such as 420 // GenCollectedHeap(ParNew,DefNew,Tenured) and 421 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 422 // need the card-mark if and only if the region is 423 // in the old gen, and do not care if the card-mark 424 // succeeds or precedes the initializing stores themselves, 425 // so long as the card-mark is completed before the next 426 // scavenge. For all these cases, we can do a card mark 427 // at the point at which we do a slow path allocation 428 // in the old gen, i.e. in this call. 429 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 430 // in addition that the card-mark for an old gen allocated 431 // object strictly follow any associated initializing stores. 432 // In these cases, the memRegion remembered below is 433 // used to card-mark the entire region either just before the next 434 // slow-path allocation by this thread or just before the next scavenge or 435 // CMS-associated safepoint, whichever of these events happens first. 436 // (The implicit assumption is that the object has been fully 437 // initialized by this point, a fact that we assert when doing the 438 // card-mark.) 439 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 440 // G1 concurrent marking is in progress an SATB (pre-write-)barrier 441 // is used to remember the pre-value of any store. Initializing 442 // stores will not need this barrier, so we need not worry about 443 // compensating for the missing pre-barrier here. Turning now 444 // to the post-barrier, we note that G1 needs a RS update barrier 445 // which simply enqueues a (sequence of) dirty cards which may 446 // optionally be refined by the concurrent update threads. Note 447 // that this barrier need only be applied to a non-young write, 448 // but, like in CMS, because of the presence of concurrent refinement 449 // (much like CMS' precleaning), must strictly follow the oop-store. 450 // Thus, using the same protocol for maintaining the intended 451 // invariants turns out, serendepitously, to be the same for both 452 // G1 and CMS. 453 // 454 // For any future collector, this code should be reexamined with 455 // that specific collector in mind, and the documentation above suitably 456 // extended and updated. 457 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 458 // If a previous card-mark was deferred, flush it now. 459 flush_deferred_store_barrier(thread); 460 if (can_elide_initializing_store_barrier(new_obj) || 461 new_obj->is_typeArray()) { 462 // Arrays of non-references don't need a pre-barrier. 463 // The deferred_card_mark region should be empty 464 // following the flush above. 465 assert(thread->deferred_card_mark().is_empty(), "Error"); 466 } else { 467 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 468 assert(!mr.is_empty(), "Error"); 469 if (_defer_initial_card_mark) { 470 // Defer the card mark 471 thread->set_deferred_card_mark(mr); 472 } else { 473 // Do the card mark 474 BarrierSet* bs = barrier_set(); 475 bs->write_region(mr); 476 } 477 } 478 return new_obj; 479 } 480 481 size_t CollectedHeap::filler_array_hdr_size() { 482 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 483 } 484 485 size_t CollectedHeap::filler_array_min_size() { 486 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 487 } 488 489 #ifdef ASSERT 490 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 491 { 492 assert(words >= min_fill_size(), "too small to fill"); 493 assert(is_object_aligned(words), "unaligned size"); 494 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 495 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 496 } 497 498 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 499 { 500 if (ZapFillerObjects && zap) { 501 Copy::fill_to_words(start + filler_array_hdr_size(), 502 words - filler_array_hdr_size(), 0XDEAFBABE); 503 } 504 } 505 #endif // ASSERT 506 507 void 508 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 509 { 510 assert(words >= filler_array_min_size(), "too small for an array"); 511 assert(words <= filler_array_max_size(), "too big for a single object"); 512 513 const size_t payload_size = words - filler_array_hdr_size(); 514 const size_t len = payload_size * HeapWordSize / sizeof(jint); 515 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 516 517 // Set the length first for concurrent GC. 518 ((arrayOop)start)->set_length((int)len); 519 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 520 DEBUG_ONLY(zap_filler_array(start, words, zap);) 521 } 522 523 void 524 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 525 { 526 assert(words <= filler_array_max_size(), "too big for a single object"); 527 528 if (words >= filler_array_min_size()) { 529 fill_with_array(start, words, zap); 530 } else if (words > 0) { 531 assert(words == min_fill_size(), "unaligned size"); 532 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 533 } 534 } 535 536 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 537 { 538 DEBUG_ONLY(fill_args_check(start, words);) 539 HandleMark hm; // Free handles before leaving. 540 fill_with_object_impl(start, words, zap); 541 } 542 543 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 544 { 545 DEBUG_ONLY(fill_args_check(start, words);) 546 HandleMark hm; // Free handles before leaving. 547 548 // Multiple objects may be required depending on the filler array maximum size. Fill 549 // the range up to that with objects that are filler_array_max_size sized. The 550 // remainder is filled with a single object. 551 const size_t min = min_fill_size(); 552 const size_t max = filler_array_max_size(); 553 while (words > max) { 554 const size_t cur = (words - max) >= min ? max : max - min; 555 fill_with_array(start, cur, zap); 556 start += cur; 557 words -= cur; 558 } 559 560 fill_with_object_impl(start, words, zap); 561 } 562 563 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 564 guarantee(false, "thread-local allocation buffers not supported"); 565 return NULL; 566 } 567 568 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 569 // The second disjunct in the assertion below makes a concession 570 // for the start-up verification done while the VM is being 571 // created. Callers be careful that you know that mutators 572 // aren't going to interfere -- for instance, this is permissible 573 // if we are still single-threaded and have either not yet 574 // started allocating (nothing much to verify) or we have 575 // started allocating but are now a full-fledged JavaThread 576 // (and have thus made our TLAB's) available for filling. 577 assert(SafepointSynchronize::is_at_safepoint() || 578 !is_init_completed(), 579 "Should only be called at a safepoint or at start-up" 580 " otherwise concurrent mutator activity may make heap " 581 " unparsable again"); 582 const bool use_tlab = UseTLAB; 583 const bool deferred = _defer_initial_card_mark; 584 // The main thread starts allocating via a TLAB even before it 585 // has added itself to the threads list at vm boot-up. 586 assert(!use_tlab || Threads::first() != NULL, 587 "Attempt to fill tlabs before main thread has been added" 588 " to threads list is doomed to failure!"); 589 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 590 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 591 #if defined(COMPILER2) || INCLUDE_JVMCI 592 // The deferred store barriers must all have been flushed to the 593 // card-table (or other remembered set structure) before GC starts 594 // processing the card-table (or other remembered set). 595 if (deferred) flush_deferred_store_barrier(thread); 596 #else 597 assert(!deferred, "Should be false"); 598 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 599 #endif 600 } 601 } 602 603 void CollectedHeap::accumulate_statistics_all_tlabs() { 604 if (UseTLAB) { 605 assert(SafepointSynchronize::is_at_safepoint() || 606 !is_init_completed(), 607 "should only accumulate statistics on tlabs at safepoint"); 608 609 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 610 } 611 } 612 613 void CollectedHeap::resize_all_tlabs() { 614 if (UseTLAB) { 615 assert(SafepointSynchronize::is_at_safepoint() || 616 !is_init_completed(), 617 "should only resize tlabs at safepoint"); 618 619 ThreadLocalAllocBuffer::resize_all_tlabs(); 620 } 621 } 622 623 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 624 assert(timer != NULL, "timer is null"); 625 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 626 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 627 HeapDumper::dump_heap(); 628 } 629 630 LogTarget(Trace, gc, classhisto) lt; 631 if (lt.is_enabled()) { 632 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 633 ResourceMark rm; 634 LogStream ls(lt); 635 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 636 inspector.doit(); 637 } 638 } 639 640 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 641 full_gc_dump(timer, true); 642 } 643 644 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 645 full_gc_dump(timer, false); 646 } 647 648 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 649 // It is important to do this in a way such that concurrent readers can't 650 // temporarily think something is in the heap. (Seen this happen in asserts.) 651 _reserved.set_word_size(0); 652 _reserved.set_start(start); 653 _reserved.set_end(end); 654 }