1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/gcTraceTime.inline.hpp" 35 #include "gc/shared/gcWhen.hpp" 36 #include "gc/shared/vmGCOperations.hpp" 37 #include "logging/log.hpp" 38 #include "memory/metaspace.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "oops/instanceMirrorKlass.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/init.hpp" 44 #include "runtime/thread.inline.hpp" 45 #include "runtime/threadSMR.hpp" 46 #include "runtime/vmThread.hpp" 47 #include "services/heapDumper.hpp" 48 #include "utilities/align.hpp" 49 50 class ClassLoaderData; 51 52 #ifdef ASSERT 53 int CollectedHeap::_fire_out_of_memory_count = 0; 54 #endif 55 56 size_t CollectedHeap::_filler_array_max_size = 0; 57 58 template <> 59 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 60 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 61 st->print_raw(m); 62 } 63 64 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 65 if (!should_log()) { 66 return; 67 } 68 69 double timestamp = fetch_timestamp(); 70 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 71 int index = compute_log_index(); 72 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 73 _records[index].timestamp = timestamp; 74 _records[index].data.is_before = before; 75 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 76 77 st.print_cr("{Heap %s GC invocations=%u (full %u):", 78 before ? "before" : "after", 79 heap->total_collections(), 80 heap->total_full_collections()); 81 82 heap->print_on(&st); 83 st.print_cr("}"); 84 } 85 86 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 87 size_t capacity_in_words = capacity() / HeapWordSize; 88 89 return VirtualSpaceSummary( 90 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 91 } 92 93 GCHeapSummary CollectedHeap::create_heap_summary() { 94 VirtualSpaceSummary heap_space = create_heap_space_summary(); 95 return GCHeapSummary(heap_space, used()); 96 } 97 98 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 99 const MetaspaceSizes meta_space( 100 MetaspaceUtils::committed_bytes(), 101 MetaspaceUtils::used_bytes(), 102 MetaspaceUtils::reserved_bytes()); 103 const MetaspaceSizes data_space( 104 MetaspaceUtils::committed_bytes(Metaspace::NonClassType), 105 MetaspaceUtils::used_bytes(Metaspace::NonClassType), 106 MetaspaceUtils::reserved_bytes(Metaspace::NonClassType)); 107 const MetaspaceSizes class_space( 108 MetaspaceUtils::committed_bytes(Metaspace::ClassType), 109 MetaspaceUtils::used_bytes(Metaspace::ClassType), 110 MetaspaceUtils::reserved_bytes(Metaspace::ClassType)); 111 112 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 113 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType); 114 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 115 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType); 116 117 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 118 ms_chunk_free_list_summary, class_chunk_free_list_summary); 119 } 120 121 void CollectedHeap::print_heap_before_gc() { 122 Universe::print_heap_before_gc(); 123 if (_gc_heap_log != NULL) { 124 _gc_heap_log->log_heap_before(this); 125 } 126 } 127 128 void CollectedHeap::print_heap_after_gc() { 129 Universe::print_heap_after_gc(); 130 if (_gc_heap_log != NULL) { 131 _gc_heap_log->log_heap_after(this); 132 } 133 } 134 135 void CollectedHeap::print_on_error(outputStream* st) const { 136 st->print_cr("Heap:"); 137 print_extended_on(st); 138 st->cr(); 139 140 BarrierSet::barrier_set()->print_on(st); 141 } 142 143 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 144 const GCHeapSummary& heap_summary = create_heap_summary(); 145 gc_tracer->report_gc_heap_summary(when, heap_summary); 146 147 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 148 gc_tracer->report_metaspace_summary(when, metaspace_summary); 149 } 150 151 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 152 trace_heap(GCWhen::BeforeGC, gc_tracer); 153 } 154 155 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 156 trace_heap(GCWhen::AfterGC, gc_tracer); 157 } 158 159 // WhiteBox API support for concurrent collectors. These are the 160 // default implementations, for collectors which don't support this 161 // feature. 162 bool CollectedHeap::supports_concurrent_phase_control() const { 163 return false; 164 } 165 166 const char* const* CollectedHeap::concurrent_phases() const { 167 static const char* const result[] = { NULL }; 168 return result; 169 } 170 171 bool CollectedHeap::request_concurrent_phase(const char* phase) { 172 return false; 173 } 174 175 bool CollectedHeap::is_oop(oop object) const { 176 if (!check_obj_alignment(object)) { 177 return false; 178 } 179 180 if (!is_in_reserved(object)) { 181 return false; 182 } 183 184 if (is_in_reserved(object->klass_or_null())) { 185 return false; 186 } 187 188 return true; 189 } 190 191 // Memory state functions. 192 193 194 CollectedHeap::CollectedHeap() : 195 _is_gc_active(false), 196 _total_collections(0), 197 _total_full_collections(0), 198 _gc_cause(GCCause::_no_gc), 199 _gc_lastcause(GCCause::_no_gc) 200 { 201 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 202 const size_t elements_per_word = HeapWordSize / sizeof(jint); 203 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 204 max_len / elements_per_word); 205 206 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 207 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 208 209 if (UsePerfData) { 210 EXCEPTION_MARK; 211 212 // create the gc cause jvmstat counters 213 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 214 80, GCCause::to_string(_gc_cause), CHECK); 215 216 _perf_gc_lastcause = 217 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 218 80, GCCause::to_string(_gc_lastcause), CHECK); 219 } 220 221 // Create the ring log 222 if (LogEvents) { 223 _gc_heap_log = new GCHeapLog(); 224 } else { 225 _gc_heap_log = NULL; 226 } 227 } 228 229 // This interface assumes that it's being called by the 230 // vm thread. It collects the heap assuming that the 231 // heap lock is already held and that we are executing in 232 // the context of the vm thread. 233 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 234 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 235 assert(Heap_lock->is_locked(), "Precondition#2"); 236 GCCauseSetter gcs(this, cause); 237 switch (cause) { 238 case GCCause::_heap_inspection: 239 case GCCause::_heap_dump: 240 case GCCause::_metadata_GC_threshold : { 241 HandleMark hm; 242 do_full_collection(false); // don't clear all soft refs 243 break; 244 } 245 case GCCause::_metadata_GC_clear_soft_refs: { 246 HandleMark hm; 247 do_full_collection(true); // do clear all soft refs 248 break; 249 } 250 default: 251 ShouldNotReachHere(); // Unexpected use of this function 252 } 253 } 254 255 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 256 size_t word_size, 257 Metaspace::MetadataType mdtype) { 258 uint loop_count = 0; 259 uint gc_count = 0; 260 uint full_gc_count = 0; 261 262 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 263 264 do { 265 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 266 if (result != NULL) { 267 return result; 268 } 269 270 if (GCLocker::is_active_and_needs_gc()) { 271 // If the GCLocker is active, just expand and allocate. 272 // If that does not succeed, wait if this thread is not 273 // in a critical section itself. 274 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 275 if (result != NULL) { 276 return result; 277 } 278 JavaThread* jthr = JavaThread::current(); 279 if (!jthr->in_critical()) { 280 // Wait for JNI critical section to be exited 281 GCLocker::stall_until_clear(); 282 // The GC invoked by the last thread leaving the critical 283 // section will be a young collection and a full collection 284 // is (currently) needed for unloading classes so continue 285 // to the next iteration to get a full GC. 286 continue; 287 } else { 288 if (CheckJNICalls) { 289 fatal("Possible deadlock due to allocating while" 290 " in jni critical section"); 291 } 292 return NULL; 293 } 294 } 295 296 { // Need lock to get self consistent gc_count's 297 MutexLocker ml(Heap_lock); 298 gc_count = Universe::heap()->total_collections(); 299 full_gc_count = Universe::heap()->total_full_collections(); 300 } 301 302 // Generate a VM operation 303 VM_CollectForMetadataAllocation op(loader_data, 304 word_size, 305 mdtype, 306 gc_count, 307 full_gc_count, 308 GCCause::_metadata_GC_threshold); 309 VMThread::execute(&op); 310 311 // If GC was locked out, try again. Check before checking success because the 312 // prologue could have succeeded and the GC still have been locked out. 313 if (op.gc_locked()) { 314 continue; 315 } 316 317 if (op.prologue_succeeded()) { 318 return op.result(); 319 } 320 loop_count++; 321 if ((QueuedAllocationWarningCount > 0) && 322 (loop_count % QueuedAllocationWarningCount == 0)) { 323 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 324 " size=" SIZE_FORMAT, loop_count, word_size); 325 } 326 } while (true); // Until a GC is done 327 } 328 329 #ifndef PRODUCT 330 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 331 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 332 for (size_t slot = 0; slot < size; slot += 1) { 333 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 334 "Found badHeapWordValue in post-allocation check"); 335 } 336 } 337 } 338 339 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 340 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 341 for (size_t slot = 0; slot < size; slot += 1) { 342 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 343 "Found non badHeapWordValue in pre-allocation check"); 344 } 345 } 346 } 347 #endif // PRODUCT 348 349 #ifdef ASSERT 350 void CollectedHeap::check_for_valid_allocation_state() { 351 Thread *thread = Thread::current(); 352 // How to choose between a pending exception and a potential 353 // OutOfMemoryError? Don't allow pending exceptions. 354 // This is a VM policy failure, so how do we exhaustively test it? 355 assert(!thread->has_pending_exception(), 356 "shouldn't be allocating with pending exception"); 357 if (StrictSafepointChecks) { 358 assert(thread->allow_allocation(), 359 "Allocation done by thread for which allocation is blocked " 360 "by No_Allocation_Verifier!"); 361 // Allocation of an oop can always invoke a safepoint, 362 // hence, the true argument 363 thread->check_for_valid_safepoint_state(true); 364 } 365 } 366 #endif 367 368 HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size, 369 bool* gc_overhead_limit_was_exceeded, TRAPS) { 370 if (UseTLAB) { 371 HeapWord* result = allocate_from_tlab(klass, size, THREAD); 372 if (result != NULL) { 373 return result; 374 } 375 } 376 377 return allocate_outside_tlab(klass, size, gc_overhead_limit_was_exceeded, THREAD); 378 } 379 380 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) { 381 HeapWord* obj = NULL; 382 383 // In assertion mode, check that there was a sampling collector present 384 // in the stack. This enforces checking that no path is without a sampling 385 // collector. 386 // Only check if the sampler could actually sample something in this call path. 387 assert(!JvmtiExport::should_post_sampled_object_alloc() 388 || !JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample() 389 || THREAD->heap_sampler().sampling_collector_present(), 390 "Sampling collector not present."); 391 392 if (ThreadHeapSampler::enabled()) { 393 // Try to allocate the sampled object from TLAB, it is possible a sample 394 // point was put and the TLAB still has space. 395 obj = THREAD->tlab().allocate_sampled_object(size); 396 397 if (obj != NULL) { 398 return obj; 399 } 400 } 401 402 ThreadLocalAllocBuffer& tlab = THREAD->tlab(); 403 404 // Retain tlab and allocate object in shared space if 405 // the amount free in the tlab is too large to discard. 406 if (tlab.free() > tlab.refill_waste_limit()) { 407 tlab.record_slow_allocation(size); 408 return NULL; 409 } 410 411 // Discard tlab and allocate a new one. 412 // To minimize fragmentation, the last TLAB may be smaller than the rest. 413 size_t new_tlab_size = tlab.compute_size(size); 414 415 tlab.clear_before_allocation(); 416 417 if (new_tlab_size == 0) { 418 return NULL; 419 } 420 421 // Allocate a new TLAB requesting new_tlab_size. Any size 422 // between minimal and new_tlab_size is accepted. 423 size_t actual_tlab_size = 0; 424 size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size); 425 obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size); 426 if (obj == NULL) { 427 assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 428 min_tlab_size, new_tlab_size, actual_tlab_size); 429 return NULL; 430 } 431 assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT, 432 p2i(obj), min_tlab_size, new_tlab_size); 433 434 AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD); 435 436 if (ZeroTLAB) { 437 // ..and clear it. 438 Copy::zero_to_words(obj, actual_tlab_size); 439 } else { 440 // ...and zap just allocated object. 441 #ifdef ASSERT 442 // Skip mangling the space corresponding to the object header to 443 // ensure that the returned space is not considered parsable by 444 // any concurrent GC thread. 445 size_t hdr_size = oopDesc::header_size(); 446 Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal); 447 #endif // ASSERT 448 } 449 450 // Send the thread information about this allocation in case a sample is 451 // requested. 452 if (ThreadHeapSampler::enabled()) { 453 size_t tlab_bytes_since_last_sample = THREAD->tlab().bytes_since_last_sample_point(); 454 THREAD->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample); 455 } 456 457 tlab.fill(obj, obj + size, actual_tlab_size); 458 return obj; 459 } 460 461 size_t CollectedHeap::max_tlab_size() const { 462 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 463 // This restriction could be removed by enabling filling with multiple arrays. 464 // If we compute that the reasonable way as 465 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 466 // we'll overflow on the multiply, so we do the divide first. 467 // We actually lose a little by dividing first, 468 // but that just makes the TLAB somewhat smaller than the biggest array, 469 // which is fine, since we'll be able to fill that. 470 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 471 sizeof(jint) * 472 ((juint) max_jint / (size_t) HeapWordSize); 473 return align_down(max_int_size, MinObjAlignment); 474 } 475 476 size_t CollectedHeap::filler_array_hdr_size() { 477 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 478 } 479 480 size_t CollectedHeap::filler_array_min_size() { 481 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 482 } 483 484 #ifdef ASSERT 485 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 486 { 487 assert(words >= min_fill_size(), "too small to fill"); 488 assert(is_object_aligned(words), "unaligned size"); 489 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 490 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 491 } 492 493 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 494 { 495 if (ZapFillerObjects && zap) { 496 Copy::fill_to_words(start + filler_array_hdr_size(), 497 words - filler_array_hdr_size(), 0XDEAFBABE); 498 } 499 } 500 #endif // ASSERT 501 502 void 503 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 504 { 505 assert(words >= filler_array_min_size(), "too small for an array"); 506 assert(words <= filler_array_max_size(), "too big for a single object"); 507 508 const size_t payload_size = words - filler_array_hdr_size(); 509 const size_t len = payload_size * HeapWordSize / sizeof(jint); 510 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 511 512 // Set the length first for concurrent GC. 513 ((arrayOop)start)->set_length((int)len); 514 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 515 DEBUG_ONLY(zap_filler_array(start, words, zap);) 516 } 517 518 void 519 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 520 { 521 assert(words <= filler_array_max_size(), "too big for a single object"); 522 523 if (words >= filler_array_min_size()) { 524 fill_with_array(start, words, zap); 525 } else if (words > 0) { 526 assert(words == min_fill_size(), "unaligned size"); 527 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 528 } 529 } 530 531 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 532 { 533 DEBUG_ONLY(fill_args_check(start, words);) 534 HandleMark hm; // Free handles before leaving. 535 fill_with_object_impl(start, words, zap); 536 } 537 538 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 539 { 540 DEBUG_ONLY(fill_args_check(start, words);) 541 HandleMark hm; // Free handles before leaving. 542 543 // Multiple objects may be required depending on the filler array maximum size. Fill 544 // the range up to that with objects that are filler_array_max_size sized. The 545 // remainder is filled with a single object. 546 const size_t min = min_fill_size(); 547 const size_t max = filler_array_max_size(); 548 while (words > max) { 549 const size_t cur = (words - max) >= min ? max : max - min; 550 fill_with_array(start, cur, zap); 551 start += cur; 552 words -= cur; 553 } 554 555 fill_with_object_impl(start, words, zap); 556 } 557 558 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 559 CollectedHeap::fill_with_object(start, end, zap); 560 } 561 562 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, 563 size_t requested_size, 564 size_t* actual_size) { 565 guarantee(false, "thread-local allocation buffers not supported"); 566 return NULL; 567 } 568 569 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 570 // The second disjunct in the assertion below makes a concession 571 // for the start-up verification done while the VM is being 572 // created. Callers be careful that you know that mutators 573 // aren't going to interfere -- for instance, this is permissible 574 // if we are still single-threaded and have either not yet 575 // started allocating (nothing much to verify) or we have 576 // started allocating but are now a full-fledged JavaThread 577 // (and have thus made our TLAB's) available for filling. 578 assert(SafepointSynchronize::is_at_safepoint() || 579 !is_init_completed(), 580 "Should only be called at a safepoint or at start-up" 581 " otherwise concurrent mutator activity may make heap " 582 " unparsable again"); 583 const bool use_tlab = UseTLAB; 584 // The main thread starts allocating via a TLAB even before it 585 // has added itself to the threads list at vm boot-up. 586 JavaThreadIteratorWithHandle jtiwh; 587 assert(!use_tlab || jtiwh.length() > 0, 588 "Attempt to fill tlabs before main thread has been added" 589 " to threads list is doomed to failure!"); 590 BarrierSet *bs = BarrierSet::barrier_set(); 591 for (; JavaThread *thread = jtiwh.next(); ) { 592 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 593 bs->make_parsable(thread); 594 } 595 } 596 597 void CollectedHeap::accumulate_statistics_all_tlabs() { 598 if (UseTLAB) { 599 assert(SafepointSynchronize::is_at_safepoint() || 600 !is_init_completed(), 601 "should only accumulate statistics on tlabs at safepoint"); 602 603 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 604 } 605 } 606 607 void CollectedHeap::resize_all_tlabs() { 608 if (UseTLAB) { 609 assert(SafepointSynchronize::is_at_safepoint() || 610 !is_init_completed(), 611 "should only resize tlabs at safepoint"); 612 613 ThreadLocalAllocBuffer::resize_all_tlabs(); 614 } 615 } 616 617 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 618 assert(timer != NULL, "timer is null"); 619 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 620 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 621 HeapDumper::dump_heap(); 622 } 623 624 LogTarget(Trace, gc, classhisto) lt; 625 if (lt.is_enabled()) { 626 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 627 ResourceMark rm; 628 LogStream ls(lt); 629 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 630 inspector.doit(); 631 } 632 } 633 634 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 635 full_gc_dump(timer, true); 636 } 637 638 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 639 full_gc_dump(timer, false); 640 } 641 642 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 643 // It is important to do this in a way such that concurrent readers can't 644 // temporarily think something is in the heap. (Seen this happen in asserts.) 645 _reserved.set_word_size(0); 646 _reserved.set_start(start); 647 _reserved.set_end(end); 648 } 649 650 void CollectedHeap::post_initialize() { 651 initialize_serviceability(); 652 } 653 654 #ifndef PRODUCT 655 656 bool CollectedHeap::promotion_should_fail(volatile size_t* count) { 657 // Access to count is not atomic; the value does not have to be exact. 658 if (PromotionFailureALot) { 659 const size_t gc_num = total_collections(); 660 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 661 if (elapsed_gcs >= PromotionFailureALotInterval) { 662 // Test for unsigned arithmetic wrap-around. 663 if (++*count >= PromotionFailureALotCount) { 664 *count = 0; 665 return true; 666 } 667 } 668 } 669 return false; 670 } 671 672 bool CollectedHeap::promotion_should_fail() { 673 return promotion_should_fail(&_promotion_failure_alot_count); 674 } 675 676 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 677 if (PromotionFailureALot) { 678 _promotion_failure_alot_gc_number = total_collections(); 679 *count = 0; 680 } 681 } 682 683 void CollectedHeap::reset_promotion_should_fail() { 684 reset_promotion_should_fail(&_promotion_failure_alot_count); 685 } 686 687 #endif // #ifndef PRODUCT 688 689 bool CollectedHeap::supports_object_pinning() const { 690 return false; 691 } 692 693 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) { 694 ShouldNotReachHere(); 695 return NULL; 696 } 697 698 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { 699 ShouldNotReachHere(); 700 } 701 702 void CollectedHeap::deduplicate_string(oop str) { 703 // Do nothing, unless overridden in subclass. 704 }