1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcLocker.inline.hpp"
  32 #include "gc/shared/gcHeapSummary.hpp"
  33 #include "gc/shared/gcTrace.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/gcVMOperations.hpp"
  36 #include "gc/shared/gcWhen.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/metaspace.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "oops/instanceMirrorKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "runtime/threadSMR.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "services/heapDumper.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 
  53 class ClassLoaderData;
  54 
  55 size_t CollectedHeap::_filler_array_max_size = 0;
  56 
  57 template <>
  58 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  59   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  60   st->print_raw(m);
  61 }
  62 
  63 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  64   if (!should_log()) {
  65     return;
  66   }
  67 
  68   double timestamp = fetch_timestamp();
  69   MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);
  70   int index = compute_log_index();
  71   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
  72   _records[index].timestamp = timestamp;
  73   _records[index].data.is_before = before;
  74   stringStream st(_records[index].data.buffer(), _records[index].data.size());
  75 
  76   st.print_cr("{Heap %s GC invocations=%u (full %u):",
  77                  before ? "before" : "after",
  78                  heap->total_collections(),
  79                  heap->total_full_collections());
  80 
  81   heap->print_on(&st);
  82   st.print_cr("}");
  83 }
  84 
  85 size_t CollectedHeap::unused() const {
  86   MutexLocker ml(Heap_lock);
  87   return capacity() - used();
  88 }
  89 
  90 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  91   size_t capacity_in_words = capacity() / HeapWordSize;
  92 
  93   return VirtualSpaceSummary(
  94     _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
  95 }
  96 
  97 GCHeapSummary CollectedHeap::create_heap_summary() {
  98   VirtualSpaceSummary heap_space = create_heap_space_summary();
  99   return GCHeapSummary(heap_space, used());
 100 }
 101 
 102 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
 103   const MetaspaceSizes meta_space(
 104       MetaspaceUtils::committed_bytes(),
 105       MetaspaceUtils::used_bytes(),
 106       MetaspaceUtils::reserved_bytes());
 107   const MetaspaceSizes data_space(
 108       MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
 109       MetaspaceUtils::used_bytes(Metaspace::NonClassType),
 110       MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
 111   const MetaspaceSizes class_space(
 112       MetaspaceUtils::committed_bytes(Metaspace::ClassType),
 113       MetaspaceUtils::used_bytes(Metaspace::ClassType),
 114       MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
 115 
 116   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
 117     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
 118   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
 119     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
 120 
 121   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
 122                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
 123 }
 124 
 125 void CollectedHeap::print_heap_before_gc() {
 126   Universe::print_heap_before_gc();
 127   if (_gc_heap_log != NULL) {
 128     _gc_heap_log->log_heap_before(this);
 129   }
 130 }
 131 
 132 void CollectedHeap::print_heap_after_gc() {
 133   Universe::print_heap_after_gc();
 134   if (_gc_heap_log != NULL) {
 135     _gc_heap_log->log_heap_after(this);
 136   }
 137 }
 138 
 139 void CollectedHeap::print() const { print_on(tty); }
 140 
 141 void CollectedHeap::print_on_error(outputStream* st) const {
 142   st->print_cr("Heap:");
 143   print_extended_on(st);
 144   st->cr();
 145 
 146   BarrierSet* bs = BarrierSet::barrier_set();
 147   if (bs != NULL) {
 148     bs->print_on(st);
 149   }
 150 }
 151 
 152 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 153   const GCHeapSummary& heap_summary = create_heap_summary();
 154   gc_tracer->report_gc_heap_summary(when, heap_summary);
 155 
 156   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 157   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 158 }
 159 
 160 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
 161   trace_heap(GCWhen::BeforeGC, gc_tracer);
 162 }
 163 
 164 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
 165   trace_heap(GCWhen::AfterGC, gc_tracer);
 166 }
 167 
 168 // Default implementation, for collectors that don't support the feature.
 169 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
 170   return false;
 171 }
 172 
 173 bool CollectedHeap::is_oop(oop object) const {
 174   if (!is_object_aligned(object)) {
 175     return false;
 176   }
 177 
 178   if (!is_in(object)) {
 179     return false;
 180   }
 181 
 182   if (is_in(object->klass_or_null())) {
 183     return false;
 184   }
 185 
 186   return true;
 187 }
 188 
 189 // Memory state functions.
 190 
 191 
 192 CollectedHeap::CollectedHeap() :
 193   _is_gc_active(false),
 194   _total_collections(0),
 195   _total_full_collections(0),
 196   _gc_cause(GCCause::_no_gc),
 197   _gc_lastcause(GCCause::_no_gc)
 198 {
 199   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 200   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 201   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 202                                              max_len / elements_per_word);
 203 
 204   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
 205   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 206 
 207   if (UsePerfData) {
 208     EXCEPTION_MARK;
 209 
 210     // create the gc cause jvmstat counters
 211     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
 212                              80, GCCause::to_string(_gc_cause), CHECK);
 213 
 214     _perf_gc_lastcause =
 215                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
 216                              80, GCCause::to_string(_gc_lastcause), CHECK);
 217   }
 218 
 219   // Create the ring log
 220   if (LogEvents) {
 221     _gc_heap_log = new GCHeapLog();
 222   } else {
 223     _gc_heap_log = NULL;
 224   }
 225 }
 226 
 227 // This interface assumes that it's being called by the
 228 // vm thread. It collects the heap assuming that the
 229 // heap lock is already held and that we are executing in
 230 // the context of the vm thread.
 231 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 232   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 233   assert(Heap_lock->is_locked(), "Precondition#2");
 234   GCCauseSetter gcs(this, cause);
 235   switch (cause) {
 236     case GCCause::_heap_inspection:
 237     case GCCause::_heap_dump:
 238     case GCCause::_metadata_GC_threshold : {
 239       HandleMark hm;
 240       do_full_collection(false);        // don't clear all soft refs
 241       break;
 242     }
 243     case GCCause::_archive_time_gc:
 244     case GCCause::_metadata_GC_clear_soft_refs: {
 245       HandleMark hm;
 246       do_full_collection(true);         // do clear all soft refs
 247       break;
 248     }
 249     default:
 250       ShouldNotReachHere(); // Unexpected use of this function
 251   }
 252 }
 253 
 254 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 255                                                             size_t word_size,
 256                                                             Metaspace::MetadataType mdtype) {
 257   uint loop_count = 0;
 258   uint gc_count = 0;
 259   uint full_gc_count = 0;
 260 
 261   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
 262 
 263   do {
 264     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 265     if (result != NULL) {
 266       return result;
 267     }
 268 
 269     if (GCLocker::is_active_and_needs_gc()) {
 270       // If the GCLocker is active, just expand and allocate.
 271       // If that does not succeed, wait if this thread is not
 272       // in a critical section itself.
 273       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
 274       if (result != NULL) {
 275         return result;
 276       }
 277       JavaThread* jthr = JavaThread::current();
 278       if (!jthr->in_critical()) {
 279         // Wait for JNI critical section to be exited
 280         GCLocker::stall_until_clear();
 281         // The GC invoked by the last thread leaving the critical
 282         // section will be a young collection and a full collection
 283         // is (currently) needed for unloading classes so continue
 284         // to the next iteration to get a full GC.
 285         continue;
 286       } else {
 287         if (CheckJNICalls) {
 288           fatal("Possible deadlock due to allocating while"
 289                 " in jni critical section");
 290         }
 291         return NULL;
 292       }
 293     }
 294 
 295     {  // Need lock to get self consistent gc_count's
 296       MutexLocker ml(Heap_lock);
 297       gc_count      = Universe::heap()->total_collections();
 298       full_gc_count = Universe::heap()->total_full_collections();
 299     }
 300 
 301     // Generate a VM operation
 302     VM_CollectForMetadataAllocation op(loader_data,
 303                                        word_size,
 304                                        mdtype,
 305                                        gc_count,
 306                                        full_gc_count,
 307                                        GCCause::_metadata_GC_threshold);
 308     VMThread::execute(&op);
 309 
 310     // If GC was locked out, try again. Check before checking success because the
 311     // prologue could have succeeded and the GC still have been locked out.
 312     if (op.gc_locked()) {
 313       continue;
 314     }
 315 
 316     if (op.prologue_succeeded()) {
 317       return op.result();
 318     }
 319     loop_count++;
 320     if ((QueuedAllocationWarningCount > 0) &&
 321         (loop_count % QueuedAllocationWarningCount == 0)) {
 322       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
 323                             " size=" SIZE_FORMAT, loop_count, word_size);
 324     }
 325   } while (true);  // Until a GC is done
 326 }
 327 
 328 MemoryUsage CollectedHeap::memory_usage() {
 329   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
 330 }
 331 
 332 
 333 #ifndef PRODUCT
 334 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 335   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 336     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
 337     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
 338       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
 339     }
 340   }
 341 }
 342 #endif // PRODUCT
 343 
 344 size_t CollectedHeap::max_tlab_size() const {
 345   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 346   // This restriction could be removed by enabling filling with multiple arrays.
 347   // If we compute that the reasonable way as
 348   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 349   // we'll overflow on the multiply, so we do the divide first.
 350   // We actually lose a little by dividing first,
 351   // but that just makes the TLAB  somewhat smaller than the biggest array,
 352   // which is fine, since we'll be able to fill that.
 353   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 354               sizeof(jint) *
 355               ((juint) max_jint / (size_t) HeapWordSize);
 356   return align_down(max_int_size, MinObjAlignment);
 357 }
 358 
 359 size_t CollectedHeap::filler_array_hdr_size() {
 360   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
 361 }
 362 
 363 size_t CollectedHeap::filler_array_min_size() {
 364   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 365 }
 366 
 367 #ifdef ASSERT
 368 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 369 {
 370   assert(words >= min_fill_size(), "too small to fill");
 371   assert(is_object_aligned(words), "unaligned size");
 372 }
 373 
 374 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 375 {
 376   if (ZapFillerObjects && zap) {
 377     Copy::fill_to_words(start + filler_array_hdr_size(),
 378                         words - filler_array_hdr_size(), 0XDEAFBABE);
 379   }
 380 }
 381 #endif // ASSERT
 382 
 383 void
 384 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 385 {
 386   assert(words >= filler_array_min_size(), "too small for an array");
 387   assert(words <= filler_array_max_size(), "too big for a single object");
 388 
 389   const size_t payload_size = words - filler_array_hdr_size();
 390   const size_t len = payload_size * HeapWordSize / sizeof(jint);
 391   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
 392 
 393   ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
 394   allocator.initialize(start);
 395   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 396 }
 397 
 398 void
 399 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 400 {
 401   assert(words <= filler_array_max_size(), "too big for a single object");
 402 
 403   if (words >= filler_array_min_size()) {
 404     fill_with_array(start, words, zap);
 405   } else if (words > 0) {
 406     assert(words == min_fill_size(), "unaligned size");
 407     ObjAllocator allocator(SystemDictionary::Object_klass(), words);
 408     allocator.initialize(start);
 409   }
 410 }
 411 
 412 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 413 {
 414   DEBUG_ONLY(fill_args_check(start, words);)
 415   HandleMark hm;  // Free handles before leaving.
 416   fill_with_object_impl(start, words, zap);
 417 }
 418 
 419 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 420 {
 421   DEBUG_ONLY(fill_args_check(start, words);)
 422   HandleMark hm;  // Free handles before leaving.
 423 
 424   // Multiple objects may be required depending on the filler array maximum size. Fill
 425   // the range up to that with objects that are filler_array_max_size sized. The
 426   // remainder is filled with a single object.
 427   const size_t min = min_fill_size();
 428   const size_t max = filler_array_max_size();
 429   while (words > max) {
 430     const size_t cur = (words - max) >= min ? max : max - min;
 431     fill_with_array(start, cur, zap);
 432     start += cur;
 433     words -= cur;
 434   }
 435 
 436   fill_with_object_impl(start, words, zap);
 437 }
 438 
 439 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 440   CollectedHeap::fill_with_object(start, end, zap);
 441 }
 442 
 443 size_t CollectedHeap::min_dummy_object_size() const {
 444   return oopDesc::header_size();
 445 }
 446 
 447 size_t CollectedHeap::tlab_alloc_reserve() const {
 448   size_t min_size = min_dummy_object_size();
 449   return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
 450 }
 451 
 452 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
 453                                            size_t requested_size,
 454                                            size_t* actual_size) {
 455   guarantee(false, "thread-local allocation buffers not supported");
 456   return NULL;
 457 }
 458 
 459 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 460   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
 461          "Should only be called at a safepoint or at start-up");
 462 
 463   ThreadLocalAllocStats stats;
 464 
 465   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
 466     BarrierSet::barrier_set()->make_parsable(thread);
 467     if (UseTLAB) {
 468       if (retire_tlabs) {
 469         thread->tlab().retire(&stats);
 470       } else {
 471         thread->tlab().make_parsable();
 472       }
 473     }
 474   }
 475 
 476   stats.publish();
 477 }
 478 
 479 void CollectedHeap::resize_all_tlabs() {
 480   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
 481          "Should only resize tlabs at safepoint");
 482 
 483   if (UseTLAB && ResizeTLAB) {
 484     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
 485       thread->tlab().resize();
 486     }
 487   }
 488 }
 489 
 490 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 491   assert(timer != NULL, "timer is null");
 492   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 493     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 494     HeapDumper::dump_heap();
 495   }
 496 
 497   LogTarget(Trace, gc, classhisto) lt;
 498   if (lt.is_enabled()) {
 499     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 500     ResourceMark rm;
 501     LogStream ls(lt);
 502     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 503     inspector.doit();
 504   }
 505 }
 506 
 507 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 508   full_gc_dump(timer, true);
 509 }
 510 
 511 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 512   full_gc_dump(timer, false);
 513 }
 514 
 515 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
 516   // It is important to do this in a way such that concurrent readers can't
 517   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 518   _reserved.set_word_size(0);
 519   _reserved.set_start((HeapWord*)rs.base());
 520   _reserved.set_end((HeapWord*)rs.end());
 521 }
 522 
 523 void CollectedHeap::post_initialize() {
 524   initialize_serviceability();
 525 }
 526 
 527 #ifndef PRODUCT
 528 
 529 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
 530   // Access to count is not atomic; the value does not have to be exact.
 531   if (PromotionFailureALot) {
 532     const size_t gc_num = total_collections();
 533     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 534     if (elapsed_gcs >= PromotionFailureALotInterval) {
 535       // Test for unsigned arithmetic wrap-around.
 536       if (++*count >= PromotionFailureALotCount) {
 537         *count = 0;
 538         return true;
 539       }
 540     }
 541   }
 542   return false;
 543 }
 544 
 545 bool CollectedHeap::promotion_should_fail() {
 546   return promotion_should_fail(&_promotion_failure_alot_count);
 547 }
 548 
 549 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
 550   if (PromotionFailureALot) {
 551     _promotion_failure_alot_gc_number = total_collections();
 552     *count = 0;
 553   }
 554 }
 555 
 556 void CollectedHeap::reset_promotion_should_fail() {
 557   reset_promotion_should_fail(&_promotion_failure_alot_count);
 558 }
 559 
 560 #endif  // #ifndef PRODUCT
 561 
 562 bool CollectedHeap::supports_object_pinning() const {
 563   return false;
 564 }
 565 
 566 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
 567   ShouldNotReachHere();
 568   return NULL;
 569 }
 570 
 571 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
 572   ShouldNotReachHere();
 573 }
 574 
 575 void CollectedHeap::deduplicate_string(oop str) {
 576   // Do nothing, unless overridden in subclass.
 577 }
 578 
 579 size_t CollectedHeap::obj_size(oop obj) const {
 580   return obj->size();
 581 }
 582 
 583 uint32_t CollectedHeap::hash_oop(oop obj) const {
 584   const uintptr_t addr = cast_from_oop<uintptr_t>(obj);
 585   return static_cast<uint32_t>(addr >> LogMinObjAlignment);
 586 }