1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/gcTraceTime.inline.hpp" 35 #include "gc/shared/gcWhen.hpp" 36 #include "gc/shared/vmGCOperations.hpp" 37 #include "logging/log.hpp" 38 #include "memory/metaspace.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "oops/instanceMirrorKlass.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/threadSMR.hpp" 45 #include "runtime/vmThread.hpp" 46 #include "services/heapDumper.hpp" 47 #include "utilities/align.hpp" 48 49 class ClassLoaderData; 50 51 #ifdef ASSERT 52 int CollectedHeap::_fire_out_of_memory_count = 0; 53 #endif 54 55 size_t CollectedHeap::_filler_array_max_size = 0; 56 57 template <> 58 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 59 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 60 st->print_raw(m); 61 } 62 63 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 64 if (!should_log()) { 65 return; 66 } 67 68 double timestamp = fetch_timestamp(); 69 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 70 int index = compute_log_index(); 71 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 72 _records[index].timestamp = timestamp; 73 _records[index].data.is_before = before; 74 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 75 76 st.print_cr("{Heap %s GC invocations=%u (full %u):", 77 before ? "before" : "after", 78 heap->total_collections(), 79 heap->total_full_collections()); 80 81 heap->print_on(&st); 82 st.print_cr("}"); 83 } 84 85 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 86 size_t capacity_in_words = capacity() / HeapWordSize; 87 88 return VirtualSpaceSummary( 89 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 90 } 91 92 GCHeapSummary CollectedHeap::create_heap_summary() { 93 VirtualSpaceSummary heap_space = create_heap_space_summary(); 94 return GCHeapSummary(heap_space, used()); 95 } 96 97 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 98 const MetaspaceSizes meta_space( 99 MetaspaceAux::committed_bytes(), 100 MetaspaceAux::used_bytes(), 101 MetaspaceAux::reserved_bytes()); 102 const MetaspaceSizes data_space( 103 MetaspaceAux::committed_bytes(Metaspace::NonClassType), 104 MetaspaceAux::used_bytes(Metaspace::NonClassType), 105 MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); 106 const MetaspaceSizes class_space( 107 MetaspaceAux::committed_bytes(Metaspace::ClassType), 108 MetaspaceAux::used_bytes(Metaspace::ClassType), 109 MetaspaceAux::reserved_bytes(Metaspace::ClassType)); 110 111 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 112 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType); 113 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 114 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType); 115 116 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 117 ms_chunk_free_list_summary, class_chunk_free_list_summary); 118 } 119 120 void CollectedHeap::print_heap_before_gc() { 121 Universe::print_heap_before_gc(); 122 if (_gc_heap_log != NULL) { 123 _gc_heap_log->log_heap_before(this); 124 } 125 } 126 127 void CollectedHeap::print_heap_after_gc() { 128 Universe::print_heap_after_gc(); 129 if (_gc_heap_log != NULL) { 130 _gc_heap_log->log_heap_after(this); 131 } 132 } 133 134 void CollectedHeap::print_on_error(outputStream* st) const { 135 st->print_cr("Heap:"); 136 print_extended_on(st); 137 st->cr(); 138 139 _barrier_set->print_on(st); 140 } 141 142 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 143 const GCHeapSummary& heap_summary = create_heap_summary(); 144 gc_tracer->report_gc_heap_summary(when, heap_summary); 145 146 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 147 gc_tracer->report_metaspace_summary(when, metaspace_summary); 148 } 149 150 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 151 trace_heap(GCWhen::BeforeGC, gc_tracer); 152 } 153 154 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 155 trace_heap(GCWhen::AfterGC, gc_tracer); 156 } 157 158 // WhiteBox API support for concurrent collectors. These are the 159 // default implementations, for collectors which don't support this 160 // feature. 161 bool CollectedHeap::supports_concurrent_phase_control() const { 162 return false; 163 } 164 165 const char* const* CollectedHeap::concurrent_phases() const { 166 static const char* const result[] = { NULL }; 167 return result; 168 } 169 170 bool CollectedHeap::request_concurrent_phase(const char* phase) { 171 return false; 172 } 173 174 // Memory state functions. 175 176 177 CollectedHeap::CollectedHeap() : 178 _barrier_set(NULL), 179 _is_gc_active(false), 180 _total_collections(0), 181 _total_full_collections(0), 182 _gc_cause(GCCause::_no_gc), 183 _gc_lastcause(GCCause::_no_gc) 184 { 185 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 186 const size_t elements_per_word = HeapWordSize / sizeof(jint); 187 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 188 max_len / elements_per_word); 189 190 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 191 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 192 193 if (UsePerfData) { 194 EXCEPTION_MARK; 195 196 // create the gc cause jvmstat counters 197 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 198 80, GCCause::to_string(_gc_cause), CHECK); 199 200 _perf_gc_lastcause = 201 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 202 80, GCCause::to_string(_gc_lastcause), CHECK); 203 } 204 205 // Create the ring log 206 if (LogEvents) { 207 _gc_heap_log = new GCHeapLog(); 208 } else { 209 _gc_heap_log = NULL; 210 } 211 } 212 213 // This interface assumes that it's being called by the 214 // vm thread. It collects the heap assuming that the 215 // heap lock is already held and that we are executing in 216 // the context of the vm thread. 217 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 218 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 219 assert(Heap_lock->is_locked(), "Precondition#2"); 220 GCCauseSetter gcs(this, cause); 221 switch (cause) { 222 case GCCause::_heap_inspection: 223 case GCCause::_heap_dump: 224 case GCCause::_metadata_GC_threshold : { 225 HandleMark hm; 226 do_full_collection(false); // don't clear all soft refs 227 break; 228 } 229 case GCCause::_metadata_GC_clear_soft_refs: { 230 HandleMark hm; 231 do_full_collection(true); // do clear all soft refs 232 break; 233 } 234 default: 235 ShouldNotReachHere(); // Unexpected use of this function 236 } 237 } 238 239 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 240 size_t word_size, 241 Metaspace::MetadataType mdtype) { 242 uint loop_count = 0; 243 uint gc_count = 0; 244 uint full_gc_count = 0; 245 246 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 247 248 do { 249 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 250 if (result != NULL) { 251 return result; 252 } 253 254 if (GCLocker::is_active_and_needs_gc()) { 255 // If the GCLocker is active, just expand and allocate. 256 // If that does not succeed, wait if this thread is not 257 // in a critical section itself. 258 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 259 if (result != NULL) { 260 return result; 261 } 262 JavaThread* jthr = JavaThread::current(); 263 if (!jthr->in_critical()) { 264 // Wait for JNI critical section to be exited 265 GCLocker::stall_until_clear(); 266 // The GC invoked by the last thread leaving the critical 267 // section will be a young collection and a full collection 268 // is (currently) needed for unloading classes so continue 269 // to the next iteration to get a full GC. 270 continue; 271 } else { 272 if (CheckJNICalls) { 273 fatal("Possible deadlock due to allocating while" 274 " in jni critical section"); 275 } 276 return NULL; 277 } 278 } 279 280 { // Need lock to get self consistent gc_count's 281 MutexLocker ml(Heap_lock); 282 gc_count = Universe::heap()->total_collections(); 283 full_gc_count = Universe::heap()->total_full_collections(); 284 } 285 286 // Generate a VM operation 287 VM_CollectForMetadataAllocation op(loader_data, 288 word_size, 289 mdtype, 290 gc_count, 291 full_gc_count, 292 GCCause::_metadata_GC_threshold); 293 VMThread::execute(&op); 294 295 // If GC was locked out, try again. Check before checking success because the 296 // prologue could have succeeded and the GC still have been locked out. 297 if (op.gc_locked()) { 298 continue; 299 } 300 301 if (op.prologue_succeeded()) { 302 return op.result(); 303 } 304 loop_count++; 305 if ((QueuedAllocationWarningCount > 0) && 306 (loop_count % QueuedAllocationWarningCount == 0)) { 307 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 308 " size=" SIZE_FORMAT, loop_count, word_size); 309 } 310 } while (true); // Until a GC is done 311 } 312 313 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) { 314 _barrier_set = barrier_set; 315 BarrierSet::set_bs(barrier_set); 316 } 317 318 #ifndef PRODUCT 319 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 320 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 321 for (size_t slot = 0; slot < size; slot += 1) { 322 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 323 "Found badHeapWordValue in post-allocation check"); 324 } 325 } 326 } 327 328 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 329 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 330 for (size_t slot = 0; slot < size; slot += 1) { 331 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 332 "Found non badHeapWordValue in pre-allocation check"); 333 } 334 } 335 } 336 #endif // PRODUCT 337 338 #ifdef ASSERT 339 void CollectedHeap::check_for_valid_allocation_state() { 340 Thread *thread = Thread::current(); 341 // How to choose between a pending exception and a potential 342 // OutOfMemoryError? Don't allow pending exceptions. 343 // This is a VM policy failure, so how do we exhaustively test it? 344 assert(!thread->has_pending_exception(), 345 "shouldn't be allocating with pending exception"); 346 if (StrictSafepointChecks) { 347 assert(thread->allow_allocation(), 348 "Allocation done by thread for which allocation is blocked " 349 "by No_Allocation_Verifier!"); 350 // Allocation of an oop can always invoke a safepoint, 351 // hence, the true argument 352 thread->check_for_valid_safepoint_state(true); 353 } 354 } 355 #endif 356 357 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { 358 359 // Retain tlab and allocate object in shared space if 360 // the amount free in the tlab is too large to discard. 361 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 362 thread->tlab().record_slow_allocation(size); 363 return NULL; 364 } 365 366 // Discard tlab and allocate a new one. 367 // To minimize fragmentation, the last TLAB may be smaller than the rest. 368 size_t new_tlab_size = thread->tlab().compute_size(size); 369 370 thread->tlab().clear_before_allocation(); 371 372 if (new_tlab_size == 0) { 373 return NULL; 374 } 375 376 // Allocate a new TLAB... 377 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 378 if (obj == NULL) { 379 return NULL; 380 } 381 382 AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread); 383 384 if (ZeroTLAB) { 385 // ..and clear it. 386 Copy::zero_to_words(obj, new_tlab_size); 387 } else { 388 // ...and zap just allocated object. 389 #ifdef ASSERT 390 // Skip mangling the space corresponding to the object header to 391 // ensure that the returned space is not considered parsable by 392 // any concurrent GC thread. 393 size_t hdr_size = oopDesc::header_size(); 394 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 395 #endif // ASSERT 396 } 397 thread->tlab().fill(obj, obj + size, new_tlab_size); 398 return obj; 399 } 400 401 size_t CollectedHeap::max_tlab_size() const { 402 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 403 // This restriction could be removed by enabling filling with multiple arrays. 404 // If we compute that the reasonable way as 405 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 406 // we'll overflow on the multiply, so we do the divide first. 407 // We actually lose a little by dividing first, 408 // but that just makes the TLAB somewhat smaller than the biggest array, 409 // which is fine, since we'll be able to fill that. 410 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 411 sizeof(jint) * 412 ((juint) max_jint / (size_t) HeapWordSize); 413 return align_down(max_int_size, MinObjAlignment); 414 } 415 416 size_t CollectedHeap::filler_array_hdr_size() { 417 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 418 } 419 420 size_t CollectedHeap::filler_array_min_size() { 421 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 422 } 423 424 #ifdef ASSERT 425 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 426 { 427 assert(words >= min_fill_size(), "too small to fill"); 428 assert(is_object_aligned(words), "unaligned size"); 429 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 430 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 431 } 432 433 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 434 { 435 if (ZapFillerObjects && zap) { 436 Copy::fill_to_words(start + filler_array_hdr_size(), 437 words - filler_array_hdr_size(), 0XDEAFBABE); 438 } 439 } 440 #endif // ASSERT 441 442 void 443 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 444 { 445 assert(words >= filler_array_min_size(), "too small for an array"); 446 assert(words <= filler_array_max_size(), "too big for a single object"); 447 448 const size_t payload_size = words - filler_array_hdr_size(); 449 const size_t len = payload_size * HeapWordSize / sizeof(jint); 450 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 451 452 // Set the length first for concurrent GC. 453 ((arrayOop)start)->set_length((int)len); 454 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 455 DEBUG_ONLY(zap_filler_array(start, words, zap);) 456 } 457 458 void 459 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 460 { 461 assert(words <= filler_array_max_size(), "too big for a single object"); 462 463 if (words >= filler_array_min_size()) { 464 fill_with_array(start, words, zap); 465 } else if (words > 0) { 466 assert(words == min_fill_size(), "unaligned size"); 467 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 468 } 469 } 470 471 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 472 { 473 DEBUG_ONLY(fill_args_check(start, words);) 474 HandleMark hm; // Free handles before leaving. 475 fill_with_object_impl(start, words, zap); 476 } 477 478 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 479 { 480 DEBUG_ONLY(fill_args_check(start, words);) 481 HandleMark hm; // Free handles before leaving. 482 483 // Multiple objects may be required depending on the filler array maximum size. Fill 484 // the range up to that with objects that are filler_array_max_size sized. The 485 // remainder is filled with a single object. 486 const size_t min = min_fill_size(); 487 const size_t max = filler_array_max_size(); 488 while (words > max) { 489 const size_t cur = (words - max) >= min ? max : max - min; 490 fill_with_array(start, cur, zap); 491 start += cur; 492 words -= cur; 493 } 494 495 fill_with_object_impl(start, words, zap); 496 } 497 498 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 499 guarantee(false, "thread-local allocation buffers not supported"); 500 return NULL; 501 } 502 503 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 504 // The second disjunct in the assertion below makes a concession 505 // for the start-up verification done while the VM is being 506 // created. Callers be careful that you know that mutators 507 // aren't going to interfere -- for instance, this is permissible 508 // if we are still single-threaded and have either not yet 509 // started allocating (nothing much to verify) or we have 510 // started allocating but are now a full-fledged JavaThread 511 // (and have thus made our TLAB's) available for filling. 512 assert(SafepointSynchronize::is_at_safepoint() || 513 !is_init_completed(), 514 "Should only be called at a safepoint or at start-up" 515 " otherwise concurrent mutator activity may make heap " 516 " unparsable again"); 517 const bool use_tlab = UseTLAB; 518 // The main thread starts allocating via a TLAB even before it 519 // has added itself to the threads list at vm boot-up. 520 JavaThreadIteratorWithHandle jtiwh; 521 assert(!use_tlab || jtiwh.length() > 0, 522 "Attempt to fill tlabs before main thread has been added" 523 " to threads list is doomed to failure!"); 524 BarrierSet *bs = barrier_set(); 525 for (; JavaThread *thread = jtiwh.next(); ) { 526 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 527 bs->make_parsable(thread); 528 } 529 } 530 531 void CollectedHeap::accumulate_statistics_all_tlabs() { 532 if (UseTLAB) { 533 assert(SafepointSynchronize::is_at_safepoint() || 534 !is_init_completed(), 535 "should only accumulate statistics on tlabs at safepoint"); 536 537 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 538 } 539 } 540 541 void CollectedHeap::resize_all_tlabs() { 542 if (UseTLAB) { 543 assert(SafepointSynchronize::is_at_safepoint() || 544 !is_init_completed(), 545 "should only resize tlabs at safepoint"); 546 547 ThreadLocalAllocBuffer::resize_all_tlabs(); 548 } 549 } 550 551 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 552 assert(timer != NULL, "timer is null"); 553 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 554 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 555 HeapDumper::dump_heap(); 556 } 557 558 LogTarget(Trace, gc, classhisto) lt; 559 if (lt.is_enabled()) { 560 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 561 ResourceMark rm; 562 LogStream ls(lt); 563 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 564 inspector.doit(); 565 } 566 } 567 568 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 569 full_gc_dump(timer, true); 570 } 571 572 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 573 full_gc_dump(timer, false); 574 } 575 576 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 577 // It is important to do this in a way such that concurrent readers can't 578 // temporarily think something is in the heap. (Seen this happen in asserts.) 579 _reserved.set_word_size(0); 580 _reserved.set_start(start); 581 _reserved.set_end(end); 582 } 583 584 void CollectedHeap::post_initialize() { 585 initialize_serviceability(); 586 }