1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/gcTraceTime.inline.hpp" 35 #include "gc/shared/gcVMOperations.hpp" 36 #include "gc/shared/gcWhen.hpp" 37 #include "gc/shared/memAllocator.hpp" 38 #include "logging/log.hpp" 39 #include "memory/metaspace.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/instanceMirrorKlass.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/init.hpp" 46 #include "runtime/mutexLocker.inline.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "runtime/threadSMR.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "services/heapDumper.hpp" 51 #include "utilities/align.hpp" 52 #include "utilities/copy.hpp" 53 #include "utilities/events.inline.hpp" 54 55 class ClassLoaderData; 56 57 size_t CollectedHeap::_filler_array_max_size = 0; 58 59 class GCHeapLog : public EventLogBase<GCMessage> { 60 private: 61 void log_heap(CollectedHeap* heap, bool before); 62 63 public: 64 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {} 65 66 void log_heap_before(CollectedHeap* heap) { 67 log_heap(heap, true); 68 } 69 void log_heap_after(CollectedHeap* heap) { 70 log_heap(heap, false); 71 } 72 }; 73 74 template <> 75 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 76 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 77 st->print_raw(m); 78 } 79 80 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 81 if (!should_log()) { 82 return; 83 } 84 85 double timestamp = fetch_timestamp(); 86 MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag); 87 int index = compute_log_index(); 88 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 89 _records[index].timestamp = timestamp; 90 _records[index].data.is_before = before; 91 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 92 93 st.print_cr("{Heap %s GC invocations=%u (full %u):", 94 before ? "before" : "after", 95 heap->total_collections(), 96 heap->total_full_collections()); 97 98 heap->print_on(&st); 99 st.print_cr("}"); 100 } 101 102 size_t CollectedHeap::unused() const { 103 MutexLocker ml(Heap_lock); 104 return capacity() - used(); 105 } 106 107 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 108 size_t capacity_in_words = capacity() / HeapWordSize; 109 110 return VirtualSpaceSummary( 111 _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end()); 112 } 113 114 GCHeapSummary CollectedHeap::create_heap_summary() { 115 VirtualSpaceSummary heap_space = create_heap_space_summary(); 116 return GCHeapSummary(heap_space, used()); 117 } 118 119 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 120 const MetaspaceSizes meta_space( 121 MetaspaceUtils::committed_bytes(), 122 MetaspaceUtils::used_bytes(), 123 MetaspaceUtils::reserved_bytes()); 124 const MetaspaceSizes data_space( 125 MetaspaceUtils::committed_bytes(Metaspace::NonClassType), 126 MetaspaceUtils::used_bytes(Metaspace::NonClassType), 127 MetaspaceUtils::reserved_bytes(Metaspace::NonClassType)); 128 const MetaspaceSizes class_space( 129 MetaspaceUtils::committed_bytes(Metaspace::ClassType), 130 MetaspaceUtils::used_bytes(Metaspace::ClassType), 131 MetaspaceUtils::reserved_bytes(Metaspace::ClassType)); 132 133 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 134 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType); 135 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 136 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType); 137 138 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 139 ms_chunk_free_list_summary, class_chunk_free_list_summary); 140 } 141 142 void CollectedHeap::print_heap_before_gc() { 143 Universe::print_heap_before_gc(); 144 if (_gc_heap_log != NULL) { 145 _gc_heap_log->log_heap_before(this); 146 } 147 } 148 149 void CollectedHeap::print_heap_after_gc() { 150 Universe::print_heap_after_gc(); 151 if (_gc_heap_log != NULL) { 152 _gc_heap_log->log_heap_after(this); 153 } 154 } 155 156 void CollectedHeap::print() const { print_on(tty); } 157 158 void CollectedHeap::print_on_error(outputStream* st) const { 159 st->print_cr("Heap:"); 160 print_extended_on(st); 161 st->cr(); 162 163 BarrierSet::barrier_set()->print_on(st); 164 } 165 166 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 167 const GCHeapSummary& heap_summary = create_heap_summary(); 168 gc_tracer->report_gc_heap_summary(when, heap_summary); 169 170 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 171 gc_tracer->report_metaspace_summary(when, metaspace_summary); 172 } 173 174 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 175 trace_heap(GCWhen::BeforeGC, gc_tracer); 176 } 177 178 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 179 trace_heap(GCWhen::AfterGC, gc_tracer); 180 } 181 182 // WhiteBox API support for concurrent collectors. These are the 183 // default implementations, for collectors which don't support this 184 // feature. 185 bool CollectedHeap::supports_concurrent_phase_control() const { 186 return false; 187 } 188 189 bool CollectedHeap::request_concurrent_phase(const char* phase) { 190 return false; 191 } 192 193 bool CollectedHeap::is_oop(oop object) const { 194 if (!is_object_aligned(object)) { 195 return false; 196 } 197 198 if (!is_in(object)) { 199 return false; 200 } 201 202 if (is_in(object->klass_or_null())) { 203 return false; 204 } 205 206 return true; 207 } 208 209 // Memory state functions. 210 211 212 CollectedHeap::CollectedHeap() : 213 _is_gc_active(false), 214 _total_collections(0), 215 _total_full_collections(0), 216 _gc_cause(GCCause::_no_gc), 217 _gc_lastcause(GCCause::_no_gc) 218 { 219 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 220 const size_t elements_per_word = HeapWordSize / sizeof(jint); 221 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 222 max_len / elements_per_word); 223 224 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 225 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 226 227 if (UsePerfData) { 228 EXCEPTION_MARK; 229 230 // create the gc cause jvmstat counters 231 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 232 80, GCCause::to_string(_gc_cause), CHECK); 233 234 _perf_gc_lastcause = 235 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 236 80, GCCause::to_string(_gc_lastcause), CHECK); 237 } 238 239 // Create the ring log 240 if (LogEvents) { 241 _gc_heap_log = new GCHeapLog(); 242 } else { 243 _gc_heap_log = NULL; 244 } 245 } 246 247 // This interface assumes that it's being called by the 248 // vm thread. It collects the heap assuming that the 249 // heap lock is already held and that we are executing in 250 // the context of the vm thread. 251 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 252 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 253 assert(Heap_lock->is_locked(), "Precondition#2"); 254 GCCauseSetter gcs(this, cause); 255 switch (cause) { 256 case GCCause::_heap_inspection: 257 case GCCause::_heap_dump: 258 case GCCause::_metadata_GC_threshold : { 259 HandleMark hm; 260 do_full_collection(false); // don't clear all soft refs 261 break; 262 } 263 case GCCause::_metadata_GC_clear_soft_refs: { 264 HandleMark hm; 265 do_full_collection(true); // do clear all soft refs 266 break; 267 } 268 default: 269 ShouldNotReachHere(); // Unexpected use of this function 270 } 271 } 272 273 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 274 size_t word_size, 275 Metaspace::MetadataType mdtype) { 276 uint loop_count = 0; 277 uint gc_count = 0; 278 uint full_gc_count = 0; 279 280 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 281 282 do { 283 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 284 if (result != NULL) { 285 return result; 286 } 287 288 if (GCLocker::is_active_and_needs_gc()) { 289 // If the GCLocker is active, just expand and allocate. 290 // If that does not succeed, wait if this thread is not 291 // in a critical section itself. 292 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 293 if (result != NULL) { 294 return result; 295 } 296 JavaThread* jthr = JavaThread::current(); 297 if (!jthr->in_critical()) { 298 // Wait for JNI critical section to be exited 299 GCLocker::stall_until_clear(); 300 // The GC invoked by the last thread leaving the critical 301 // section will be a young collection and a full collection 302 // is (currently) needed for unloading classes so continue 303 // to the next iteration to get a full GC. 304 continue; 305 } else { 306 if (CheckJNICalls) { 307 fatal("Possible deadlock due to allocating while" 308 " in jni critical section"); 309 } 310 return NULL; 311 } 312 } 313 314 { // Need lock to get self consistent gc_count's 315 MutexLocker ml(Heap_lock); 316 gc_count = Universe::heap()->total_collections(); 317 full_gc_count = Universe::heap()->total_full_collections(); 318 } 319 320 // Generate a VM operation 321 VM_CollectForMetadataAllocation op(loader_data, 322 word_size, 323 mdtype, 324 gc_count, 325 full_gc_count, 326 GCCause::_metadata_GC_threshold); 327 VMThread::execute(&op); 328 329 // If GC was locked out, try again. Check before checking success because the 330 // prologue could have succeeded and the GC still have been locked out. 331 if (op.gc_locked()) { 332 continue; 333 } 334 335 if (op.prologue_succeeded()) { 336 return op.result(); 337 } 338 loop_count++; 339 if ((QueuedAllocationWarningCount > 0) && 340 (loop_count % QueuedAllocationWarningCount == 0)) { 341 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 342 " size=" SIZE_FORMAT, loop_count, word_size); 343 } 344 } while (true); // Until a GC is done 345 } 346 347 MemoryUsage CollectedHeap::memory_usage() { 348 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 349 } 350 351 352 #ifndef PRODUCT 353 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 354 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 355 for (size_t slot = 0; slot < size; slot += 1) { 356 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 357 "Found non badHeapWordValue in pre-allocation check"); 358 } 359 } 360 } 361 #endif // PRODUCT 362 363 size_t CollectedHeap::max_tlab_size() const { 364 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 365 // This restriction could be removed by enabling filling with multiple arrays. 366 // If we compute that the reasonable way as 367 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 368 // we'll overflow on the multiply, so we do the divide first. 369 // We actually lose a little by dividing first, 370 // but that just makes the TLAB somewhat smaller than the biggest array, 371 // which is fine, since we'll be able to fill that. 372 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 373 sizeof(jint) * 374 ((juint) max_jint / (size_t) HeapWordSize); 375 return align_down(max_int_size, MinObjAlignment); 376 } 377 378 size_t CollectedHeap::filler_array_hdr_size() { 379 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 380 } 381 382 size_t CollectedHeap::filler_array_min_size() { 383 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 384 } 385 386 #ifdef ASSERT 387 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 388 { 389 assert(words >= min_fill_size(), "too small to fill"); 390 assert(is_object_aligned(words), "unaligned size"); 391 } 392 393 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 394 { 395 if (ZapFillerObjects && zap) { 396 Copy::fill_to_words(start + filler_array_hdr_size(), 397 words - filler_array_hdr_size(), 0XDEAFBABE); 398 } 399 } 400 #endif // ASSERT 401 402 void 403 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 404 { 405 assert(words >= filler_array_min_size(), "too small for an array"); 406 assert(words <= filler_array_max_size(), "too big for a single object"); 407 408 const size_t payload_size = words - filler_array_hdr_size(); 409 const size_t len = payload_size * HeapWordSize / sizeof(jint); 410 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 411 412 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false); 413 allocator.initialize(start); 414 DEBUG_ONLY(zap_filler_array(start, words, zap);) 415 } 416 417 void 418 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 419 { 420 assert(words <= filler_array_max_size(), "too big for a single object"); 421 422 if (words >= filler_array_min_size()) { 423 fill_with_array(start, words, zap); 424 } else if (words > 0) { 425 assert(words == min_fill_size(), "unaligned size"); 426 ObjAllocator allocator(SystemDictionary::Object_klass(), words); 427 allocator.initialize(start); 428 } 429 } 430 431 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 432 { 433 DEBUG_ONLY(fill_args_check(start, words);) 434 HandleMark hm; // Free handles before leaving. 435 fill_with_object_impl(start, words, zap); 436 } 437 438 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 439 { 440 DEBUG_ONLY(fill_args_check(start, words);) 441 HandleMark hm; // Free handles before leaving. 442 443 // Multiple objects may be required depending on the filler array maximum size. Fill 444 // the range up to that with objects that are filler_array_max_size sized. The 445 // remainder is filled with a single object. 446 const size_t min = min_fill_size(); 447 const size_t max = filler_array_max_size(); 448 while (words > max) { 449 const size_t cur = (words - max) >= min ? max : max - min; 450 fill_with_array(start, cur, zap); 451 start += cur; 452 words -= cur; 453 } 454 455 fill_with_object_impl(start, words, zap); 456 } 457 458 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 459 CollectedHeap::fill_with_object(start, end, zap); 460 } 461 462 size_t CollectedHeap::min_dummy_object_size() const { 463 return oopDesc::header_size(); 464 } 465 466 size_t CollectedHeap::tlab_alloc_reserve() const { 467 size_t min_size = min_dummy_object_size(); 468 return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0; 469 } 470 471 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, 472 size_t requested_size, 473 size_t* actual_size) { 474 guarantee(false, "thread-local allocation buffers not supported"); 475 return NULL; 476 } 477 478 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 479 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 480 "Should only be called at a safepoint or at start-up"); 481 482 ThreadLocalAllocStats stats; 483 484 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) { 485 BarrierSet::barrier_set()->make_parsable(thread); 486 if (UseTLAB) { 487 if (retire_tlabs) { 488 thread->tlab().retire(&stats); 489 } else { 490 thread->tlab().make_parsable(); 491 } 492 } 493 } 494 495 stats.publish(); 496 } 497 498 void CollectedHeap::resize_all_tlabs() { 499 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 500 "Should only resize tlabs at safepoint"); 501 502 if (UseTLAB && ResizeTLAB) { 503 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 504 thread->tlab().resize(); 505 } 506 } 507 } 508 509 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 510 assert(timer != NULL, "timer is null"); 511 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 512 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 513 HeapDumper::dump_heap(); 514 } 515 516 LogTarget(Trace, gc, classhisto) lt; 517 if (lt.is_enabled()) { 518 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 519 ResourceMark rm; 520 LogStream ls(lt); 521 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 522 inspector.doit(); 523 } 524 } 525 526 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 527 full_gc_dump(timer, true); 528 } 529 530 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 531 full_gc_dump(timer, false); 532 } 533 534 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) { 535 // It is important to do this in a way such that concurrent readers can't 536 // temporarily think something is in the heap. (Seen this happen in asserts.) 537 _reserved.set_word_size(0); 538 _reserved.set_start((HeapWord*)rs.base()); 539 _reserved.set_end((HeapWord*)rs.end()); 540 } 541 542 void CollectedHeap::post_initialize() { 543 initialize_serviceability(); 544 } 545 546 #ifndef PRODUCT 547 548 bool CollectedHeap::promotion_should_fail(volatile size_t* count) { 549 // Access to count is not atomic; the value does not have to be exact. 550 if (PromotionFailureALot) { 551 const size_t gc_num = total_collections(); 552 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 553 if (elapsed_gcs >= PromotionFailureALotInterval) { 554 // Test for unsigned arithmetic wrap-around. 555 if (++*count >= PromotionFailureALotCount) { 556 *count = 0; 557 return true; 558 } 559 } 560 } 561 return false; 562 } 563 564 bool CollectedHeap::promotion_should_fail() { 565 return promotion_should_fail(&_promotion_failure_alot_count); 566 } 567 568 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 569 if (PromotionFailureALot) { 570 _promotion_failure_alot_gc_number = total_collections(); 571 *count = 0; 572 } 573 } 574 575 void CollectedHeap::reset_promotion_should_fail() { 576 reset_promotion_should_fail(&_promotion_failure_alot_count); 577 } 578 579 #endif // #ifndef PRODUCT 580 581 bool CollectedHeap::supports_object_pinning() const { 582 return false; 583 } 584 585 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) { 586 ShouldNotReachHere(); 587 return NULL; 588 } 589 590 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { 591 ShouldNotReachHere(); 592 } 593 594 void CollectedHeap::deduplicate_string(oop str) { 595 // Do nothing, unless overridden in subclass. 596 } 597 598 size_t CollectedHeap::obj_size(oop obj) const { 599 return obj->size(); 600 } 601 602 uint32_t CollectedHeap::hash_oop(oop obj) const { 603 const uintptr_t addr = cast_from_oop<uintptr_t>(obj); 604 return static_cast<uint32_t>(addr >> LogMinObjAlignment); 605 }