1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcHeapSummary.hpp" 31 #include "gc/shared/gcTrace.hpp" 32 #include "gc/shared/gcTraceTime.inline.hpp" 33 #include "gc/shared/gcWhen.hpp" 34 #include "gc/shared/vmGCOperations.hpp" 35 #include "logging/log.hpp" 36 #include "memory/metaspace.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/instanceMirrorKlass.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/init.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "services/heapDumper.hpp" 43 44 45 #ifdef ASSERT 46 int CollectedHeap::_fire_out_of_memory_count = 0; 47 #endif 48 49 size_t CollectedHeap::_filler_array_max_size = 0; 50 51 template <> 52 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 53 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 54 st->print_raw(m); 55 } 56 57 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 58 if (!should_log()) { 59 return; 60 } 61 62 double timestamp = fetch_timestamp(); 63 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 64 int index = compute_log_index(); 65 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 66 _records[index].timestamp = timestamp; 67 _records[index].data.is_before = before; 68 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 69 70 st.print_cr("{Heap %s GC invocations=%u (full %u):", 71 before ? "before" : "after", 72 heap->total_collections(), 73 heap->total_full_collections()); 74 75 heap->print_on(&st); 76 st.print_cr("}"); 77 } 78 79 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 80 size_t capacity_in_words = capacity() / HeapWordSize; 81 82 return VirtualSpaceSummary( 83 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 84 } 85 86 GCHeapSummary CollectedHeap::create_heap_summary() { 87 VirtualSpaceSummary heap_space = create_heap_space_summary(); 88 return GCHeapSummary(heap_space, used()); 89 } 90 91 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 92 const MetaspaceSizes meta_space( 93 MetaspaceAux::committed_bytes(), 94 MetaspaceAux::used_bytes(), 95 MetaspaceAux::reserved_bytes()); 96 const MetaspaceSizes data_space( 97 MetaspaceAux::committed_bytes(Metaspace::NonClassType), 98 MetaspaceAux::used_bytes(Metaspace::NonClassType), 99 MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); 100 const MetaspaceSizes class_space( 101 MetaspaceAux::committed_bytes(Metaspace::ClassType), 102 MetaspaceAux::used_bytes(Metaspace::ClassType), 103 MetaspaceAux::reserved_bytes(Metaspace::ClassType)); 104 105 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 106 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType); 107 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 108 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType); 109 110 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 111 ms_chunk_free_list_summary, class_chunk_free_list_summary); 112 } 113 114 void CollectedHeap::print_heap_before_gc() { 115 Universe::print_heap_before_gc(); 116 if (_gc_heap_log != NULL) { 117 _gc_heap_log->log_heap_before(this); 118 } 119 } 120 121 void CollectedHeap::print_heap_after_gc() { 122 Universe::print_heap_after_gc(); 123 if (_gc_heap_log != NULL) { 124 _gc_heap_log->log_heap_after(this); 125 } 126 } 127 128 void CollectedHeap::print_on_error(outputStream* st) const { 129 st->print_cr("Heap:"); 130 print_extended_on(st); 131 st->cr(); 132 133 _barrier_set->print_on(st); 134 } 135 136 void CollectedHeap::register_nmethod(nmethod* nm) { 137 assert_locked_or_safepoint(CodeCache_lock); 138 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) { 139 CodeCache::add_scavenge_root_nmethod(nm); 140 } 141 } 142 143 void CollectedHeap::unregister_nmethod(nmethod* nm) { 144 assert_locked_or_safepoint(CodeCache_lock); 145 } 146 147 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 148 const GCHeapSummary& heap_summary = create_heap_summary(); 149 gc_tracer->report_gc_heap_summary(when, heap_summary); 150 151 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 152 gc_tracer->report_metaspace_summary(when, metaspace_summary); 153 } 154 155 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 156 trace_heap(GCWhen::BeforeGC, gc_tracer); 157 } 158 159 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 160 trace_heap(GCWhen::AfterGC, gc_tracer); 161 } 162 163 // WhiteBox API support for concurrent collectors. These are the 164 // default implementations, for collectors which don't support this 165 // feature. 166 bool CollectedHeap::supports_concurrent_phase_control() const { 167 return false; 168 } 169 170 const char* const* CollectedHeap::concurrent_phases() const { 171 static const char* const result[] = { NULL }; 172 return result; 173 } 174 175 bool CollectedHeap::request_concurrent_phase(const char* phase) { 176 return false; 177 } 178 179 // Memory state functions. 180 181 182 CollectedHeap::CollectedHeap() : 183 _barrier_set(NULL), 184 _is_gc_active(false), 185 _total_collections(0), 186 _total_full_collections(0), 187 _gc_cause(GCCause::_no_gc), 188 _gc_lastcause(GCCause::_no_gc) 189 { 190 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 191 const size_t elements_per_word = HeapWordSize / sizeof(jint); 192 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 193 max_len / elements_per_word); 194 195 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 196 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 197 198 if (UsePerfData) { 199 EXCEPTION_MARK; 200 201 // create the gc cause jvmstat counters 202 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 203 80, GCCause::to_string(_gc_cause), CHECK); 204 205 _perf_gc_lastcause = 206 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 207 80, GCCause::to_string(_gc_lastcause), CHECK); 208 } 209 210 // Create the ring log 211 if (LogEvents) { 212 _gc_heap_log = new GCHeapLog(); 213 } else { 214 _gc_heap_log = NULL; 215 } 216 } 217 218 // This interface assumes that it's being called by the 219 // vm thread. It collects the heap assuming that the 220 // heap lock is already held and that we are executing in 221 // the context of the vm thread. 222 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 223 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 224 assert(Heap_lock->is_locked(), "Precondition#2"); 225 GCCauseSetter gcs(this, cause); 226 switch (cause) { 227 case GCCause::_heap_inspection: 228 case GCCause::_heap_dump: 229 case GCCause::_metadata_GC_threshold : { 230 HandleMark hm; 231 do_full_collection(false); // don't clear all soft refs 232 break; 233 } 234 case GCCause::_metadata_GC_clear_soft_refs: { 235 HandleMark hm; 236 do_full_collection(true); // do clear all soft refs 237 break; 238 } 239 default: 240 ShouldNotReachHere(); // Unexpected use of this function 241 } 242 } 243 244 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) { 245 _barrier_set = barrier_set; 246 oopDesc::set_bs(_barrier_set); 247 } 248 249 void CollectedHeap::pre_initialize() { 250 } 251 252 #ifndef PRODUCT 253 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 254 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 255 for (size_t slot = 0; slot < size; slot += 1) { 256 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 257 "Found badHeapWordValue in post-allocation check"); 258 } 259 } 260 } 261 262 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 263 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 264 for (size_t slot = 0; slot < size; slot += 1) { 265 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 266 "Found non badHeapWordValue in pre-allocation check"); 267 } 268 } 269 } 270 #endif // PRODUCT 271 272 #ifdef ASSERT 273 void CollectedHeap::check_for_valid_allocation_state() { 274 Thread *thread = Thread::current(); 275 // How to choose between a pending exception and a potential 276 // OutOfMemoryError? Don't allow pending exceptions. 277 // This is a VM policy failure, so how do we exhaustively test it? 278 assert(!thread->has_pending_exception(), 279 "shouldn't be allocating with pending exception"); 280 if (StrictSafepointChecks) { 281 assert(thread->allow_allocation(), 282 "Allocation done by thread for which allocation is blocked " 283 "by No_Allocation_Verifier!"); 284 // Allocation of an oop can always invoke a safepoint, 285 // hence, the true argument 286 thread->check_for_valid_safepoint_state(true); 287 } 288 } 289 #endif 290 291 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { 292 293 // Retain tlab and allocate object in shared space if 294 // the amount free in the tlab is too large to discard. 295 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 296 thread->tlab().record_slow_allocation(size); 297 return NULL; 298 } 299 300 // Discard tlab and allocate a new one. 301 // To minimize fragmentation, the last TLAB may be smaller than the rest. 302 size_t new_tlab_size = thread->tlab().compute_size(size); 303 304 thread->tlab().clear_before_allocation(); 305 306 if (new_tlab_size == 0) { 307 return NULL; 308 } 309 310 // Allocate a new TLAB... 311 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 312 if (obj == NULL) { 313 return NULL; 314 } 315 316 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); 317 318 if (ZeroTLAB) { 319 // ..and clear it. 320 Copy::zero_to_words(obj, new_tlab_size); 321 } else { 322 // ...and zap just allocated object. 323 #ifdef ASSERT 324 // Skip mangling the space corresponding to the object header to 325 // ensure that the returned space is not considered parsable by 326 // any concurrent GC thread. 327 size_t hdr_size = oopDesc::header_size(); 328 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 329 #endif // ASSERT 330 } 331 thread->tlab().fill(obj, obj + size, new_tlab_size); 332 return obj; 333 } 334 335 size_t CollectedHeap::max_tlab_size() const { 336 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 337 // This restriction could be removed by enabling filling with multiple arrays. 338 // If we compute that the reasonable way as 339 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 340 // we'll overflow on the multiply, so we do the divide first. 341 // We actually lose a little by dividing first, 342 // but that just makes the TLAB somewhat smaller than the biggest array, 343 // which is fine, since we'll be able to fill that. 344 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 345 sizeof(jint) * 346 ((juint) max_jint / (size_t) HeapWordSize); 347 return align_size_down(max_int_size, MinObjAlignment); 348 } 349 350 size_t CollectedHeap::filler_array_hdr_size() { 351 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 352 } 353 354 size_t CollectedHeap::filler_array_min_size() { 355 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 356 } 357 358 #ifdef ASSERT 359 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 360 { 361 assert(words >= min_fill_size(), "too small to fill"); 362 assert(words % MinObjAlignment == 0, "unaligned size"); 363 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 364 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 365 } 366 367 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 368 { 369 if (ZapFillerObjects && zap) { 370 Copy::fill_to_words(start + filler_array_hdr_size(), 371 words - filler_array_hdr_size(), 0XDEAFBABE); 372 } 373 } 374 #endif // ASSERT 375 376 void 377 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 378 { 379 assert(words >= filler_array_min_size(), "too small for an array"); 380 assert(words <= filler_array_max_size(), "too big for a single object"); 381 382 const size_t payload_size = words - filler_array_hdr_size(); 383 const size_t len = payload_size * HeapWordSize / sizeof(jint); 384 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 385 386 // Set the length first for concurrent GC. 387 ((arrayOop)start)->set_length((int)len); 388 post_allocation_setup_common(Universe::intArrayKlassObj(), start); 389 DEBUG_ONLY(zap_filler_array(start, words, zap);) 390 } 391 392 void 393 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 394 { 395 assert(words <= filler_array_max_size(), "too big for a single object"); 396 397 if (words >= filler_array_min_size()) { 398 fill_with_array(start, words, zap); 399 } else if (words > 0) { 400 assert(words == min_fill_size(), "unaligned size"); 401 post_allocation_setup_common(SystemDictionary::Object_klass(), start); 402 } 403 } 404 405 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 406 { 407 DEBUG_ONLY(fill_args_check(start, words);) 408 HandleMark hm; // Free handles before leaving. 409 fill_with_object_impl(start, words, zap); 410 } 411 412 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 413 { 414 DEBUG_ONLY(fill_args_check(start, words);) 415 HandleMark hm; // Free handles before leaving. 416 417 // Multiple objects may be required depending on the filler array maximum size. Fill 418 // the range up to that with objects that are filler_array_max_size sized. The 419 // remainder is filled with a single object. 420 const size_t min = min_fill_size(); 421 const size_t max = filler_array_max_size(); 422 while (words > max) { 423 const size_t cur = (words - max) >= min ? max : max - min; 424 fill_with_array(start, cur, zap); 425 start += cur; 426 words -= cur; 427 } 428 429 fill_with_object_impl(start, words, zap); 430 } 431 432 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 433 guarantee(false, "thread-local allocation buffers not supported"); 434 return NULL; 435 } 436 437 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 438 // The second disjunct in the assertion below makes a concession 439 // for the start-up verification done while the VM is being 440 // created. Callers be careful that you know that mutators 441 // aren't going to interfere -- for instance, this is permissible 442 // if we are still single-threaded and have either not yet 443 // started allocating (nothing much to verify) or we have 444 // started allocating but are now a full-fledged JavaThread 445 // (and have thus made our TLAB's) available for filling. 446 assert(SafepointSynchronize::is_at_safepoint() || 447 !is_init_completed(), 448 "Should only be called at a safepoint or at start-up" 449 " otherwise concurrent mutator activity may make heap " 450 " unparsable again"); 451 const bool use_tlab = UseTLAB; 452 // The main thread starts allocating via a TLAB even before it 453 // has added itself to the threads list at vm boot-up. 454 assert(!use_tlab || Threads::first() != NULL, 455 "Attempt to fill tlabs before main thread has been added" 456 " to threads list is doomed to failure!"); 457 BarrierSet *bs = barrier_set(); 458 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 459 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 460 bs->make_parsable(thread); 461 } 462 } 463 464 void CollectedHeap::accumulate_statistics_all_tlabs() { 465 if (UseTLAB) { 466 assert(SafepointSynchronize::is_at_safepoint() || 467 !is_init_completed(), 468 "should only accumulate statistics on tlabs at safepoint"); 469 470 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 471 } 472 } 473 474 void CollectedHeap::resize_all_tlabs() { 475 if (UseTLAB) { 476 assert(SafepointSynchronize::is_at_safepoint() || 477 !is_init_completed(), 478 "should only resize tlabs at safepoint"); 479 480 ThreadLocalAllocBuffer::resize_all_tlabs(); 481 } 482 } 483 484 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 485 assert(timer != NULL, "timer is null"); 486 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 487 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 488 HeapDumper::dump_heap(); 489 } 490 491 Log(gc, classhisto) log; 492 if (log.is_trace()) { 493 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 494 ResourceMark rm; 495 VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */); 496 inspector.doit(); 497 } 498 } 499 500 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 501 full_gc_dump(timer, true); 502 } 503 504 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 505 full_gc_dump(timer, false); 506 } 507 508 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 509 // It is important to do this in a way such that concurrent readers can't 510 // temporarily think something is in the heap. (Seen this happen in asserts.) 511 _reserved.set_word_size(0); 512 _reserved.set_start(start); 513 _reserved.set_end(end); 514 } 515 516 void CollectedHeap::verify_nmethod_roots(nmethod* nmethod) { 517 nmethod->verify_scavenge_root_oops(); 518 }