1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/fill.hpp"
  32 #include "gc/shared/gcLocker.inline.hpp"
  33 #include "gc/shared/gcHeapSummary.hpp"
  34 #include "gc/shared/gcTrace.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/gcWhen.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/vmGCOperations.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/metaspace.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/instanceMirrorKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "runtime/threadSMR.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "services/heapDumper.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 
  53 class ClassLoaderData;
  54 
  55 #ifdef ASSERT
  56 int CollectedHeap::_fire_out_of_memory_count = 0;
  57 #endif
  58 
  59 template <>
  60 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  61   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  62   st->print_raw(m);
  63 }
  64 
  65 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  66   if (!should_log()) {
  67     return;
  68   }
  69 
  70   double timestamp = fetch_timestamp();
  71   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
  72   int index = compute_log_index();
  73   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
  74   _records[index].timestamp = timestamp;
  75   _records[index].data.is_before = before;
  76   stringStream st(_records[index].data.buffer(), _records[index].data.size());
  77 
  78   st.print_cr("{Heap %s GC invocations=%u (full %u):",
  79                  before ? "before" : "after",
  80                  heap->total_collections(),
  81                  heap->total_full_collections());
  82 
  83   heap->print_on(&st);
  84   st.print_cr("}");
  85 }
  86 
  87 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  88   size_t capacity_in_words = capacity() / HeapWordSize;
  89 
  90   return VirtualSpaceSummary(
  91     reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
  92 }
  93 
  94 GCHeapSummary CollectedHeap::create_heap_summary() {
  95   VirtualSpaceSummary heap_space = create_heap_space_summary();
  96   return GCHeapSummary(heap_space, used());
  97 }
  98 
  99 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
 100   const MetaspaceSizes meta_space(
 101       MetaspaceUtils::committed_bytes(),
 102       MetaspaceUtils::used_bytes(),
 103       MetaspaceUtils::reserved_bytes());
 104   const MetaspaceSizes data_space(
 105       MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
 106       MetaspaceUtils::used_bytes(Metaspace::NonClassType),
 107       MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
 108   const MetaspaceSizes class_space(
 109       MetaspaceUtils::committed_bytes(Metaspace::ClassType),
 110       MetaspaceUtils::used_bytes(Metaspace::ClassType),
 111       MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
 112 
 113   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
 114     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
 115   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
 116     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
 117 
 118   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
 119                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
 120 }
 121 
 122 void CollectedHeap::print_heap_before_gc() {
 123   Universe::print_heap_before_gc();
 124   if (_gc_heap_log != NULL) {
 125     _gc_heap_log->log_heap_before(this);
 126   }
 127 }
 128 
 129 void CollectedHeap::print_heap_after_gc() {
 130   Universe::print_heap_after_gc();
 131   if (_gc_heap_log != NULL) {
 132     _gc_heap_log->log_heap_after(this);
 133   }
 134 }
 135 
 136 void CollectedHeap::print_on_error(outputStream* st) const {
 137   st->print_cr("Heap:");
 138   print_extended_on(st);
 139   st->cr();
 140 
 141   BarrierSet::barrier_set()->print_on(st);
 142 }
 143 
 144 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 145   const GCHeapSummary& heap_summary = create_heap_summary();
 146   gc_tracer->report_gc_heap_summary(when, heap_summary);
 147 
 148   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 149   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 150 }
 151 
 152 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
 153   trace_heap(GCWhen::BeforeGC, gc_tracer);
 154 }
 155 
 156 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
 157   trace_heap(GCWhen::AfterGC, gc_tracer);
 158 }
 159 
 160 // WhiteBox API support for concurrent collectors.  These are the
 161 // default implementations, for collectors which don't support this
 162 // feature.
 163 bool CollectedHeap::supports_concurrent_phase_control() const {
 164   return false;
 165 }
 166 
 167 const char* const* CollectedHeap::concurrent_phases() const {
 168   static const char* const result[] = { NULL };
 169   return result;
 170 }
 171 
 172 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 173   return false;
 174 }
 175 
 176 bool CollectedHeap::is_oop(oop object) const {
 177   if (!check_obj_alignment(object)) {
 178     return false;
 179   }
 180 
 181   if (!is_in_reserved(object)) {
 182     return false;
 183   }
 184 
 185   if (is_in_reserved(object->klass_or_null())) {
 186     return false;
 187   }
 188 
 189   return true;
 190 }
 191 
 192 CollectedHeap::CollectedHeap() :
 193   _is_gc_active(false),
 194   _total_collections(0),
 195   _total_full_collections(0),
 196   _gc_cause(GCCause::_no_gc),
 197   _gc_lastcause(GCCause::_no_gc)
 198 {
 199   Fill::initialize();
 200 
 201   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
 202   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 203 
 204   if (UsePerfData) {
 205     EXCEPTION_MARK;
 206 
 207     // create the gc cause jvmstat counters
 208     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
 209                              80, GCCause::to_string(_gc_cause), CHECK);
 210 
 211     _perf_gc_lastcause =
 212                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
 213                              80, GCCause::to_string(_gc_lastcause), CHECK);
 214   }
 215 
 216   // Create the ring log
 217   if (LogEvents) {
 218     _gc_heap_log = new GCHeapLog();
 219   } else {
 220     _gc_heap_log = NULL;
 221   }
 222 }
 223 
 224 // This interface assumes that it's being called by the
 225 // vm thread. It collects the heap assuming that the
 226 // heap lock is already held and that we are executing in
 227 // the context of the vm thread.
 228 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 229   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 230   assert(Heap_lock->is_locked(), "Precondition#2");
 231   GCCauseSetter gcs(this, cause);
 232   switch (cause) {
 233     case GCCause::_heap_inspection:
 234     case GCCause::_heap_dump:
 235     case GCCause::_metadata_GC_threshold : {
 236       HandleMark hm;
 237       do_full_collection(false);        // don't clear all soft refs
 238       break;
 239     }
 240     case GCCause::_metadata_GC_clear_soft_refs: {
 241       HandleMark hm;
 242       do_full_collection(true);         // do clear all soft refs
 243       break;
 244     }
 245     default:
 246       ShouldNotReachHere(); // Unexpected use of this function
 247   }
 248 }
 249 
 250 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 251                                                             size_t word_size,
 252                                                             Metaspace::MetadataType mdtype) {
 253   uint loop_count = 0;
 254   uint gc_count = 0;
 255   uint full_gc_count = 0;
 256 
 257   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
 258 
 259   do {
 260     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 261     if (result != NULL) {
 262       return result;
 263     }
 264 
 265     if (GCLocker::is_active_and_needs_gc()) {
 266       // If the GCLocker is active, just expand and allocate.
 267       // If that does not succeed, wait if this thread is not
 268       // in a critical section itself.
 269       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
 270       if (result != NULL) {
 271         return result;
 272       }
 273       JavaThread* jthr = JavaThread::current();
 274       if (!jthr->in_critical()) {
 275         // Wait for JNI critical section to be exited
 276         GCLocker::stall_until_clear();
 277         // The GC invoked by the last thread leaving the critical
 278         // section will be a young collection and a full collection
 279         // is (currently) needed for unloading classes so continue
 280         // to the next iteration to get a full GC.
 281         continue;
 282       } else {
 283         if (CheckJNICalls) {
 284           fatal("Possible deadlock due to allocating while"
 285                 " in jni critical section");
 286         }
 287         return NULL;
 288       }
 289     }
 290 
 291     {  // Need lock to get self consistent gc_count's
 292       MutexLocker ml(Heap_lock);
 293       gc_count      = Universe::heap()->total_collections();
 294       full_gc_count = Universe::heap()->total_full_collections();
 295     }
 296 
 297     // Generate a VM operation
 298     VM_CollectForMetadataAllocation op(loader_data,
 299                                        word_size,
 300                                        mdtype,
 301                                        gc_count,
 302                                        full_gc_count,
 303                                        GCCause::_metadata_GC_threshold);
 304     VMThread::execute(&op);
 305 
 306     // If GC was locked out, try again. Check before checking success because the
 307     // prologue could have succeeded and the GC still have been locked out.
 308     if (op.gc_locked()) {
 309       continue;
 310     }
 311 
 312     if (op.prologue_succeeded()) {
 313       return op.result();
 314     }
 315     loop_count++;
 316     if ((QueuedAllocationWarningCount > 0) &&
 317         (loop_count % QueuedAllocationWarningCount == 0)) {
 318       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
 319                             " size=" SIZE_FORMAT, loop_count, word_size);
 320     }
 321   } while (true);  // Until a GC is done
 322 }
 323 
 324 MemoryUsage CollectedHeap::memory_usage() {
 325   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
 326 }
 327 
 328 
 329 #ifndef PRODUCT
 330 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 331   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 332     for (size_t slot = 0; slot < size; slot += 1) {
 333       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 334              "Found non badHeapWordValue in pre-allocation check");
 335     }
 336   }
 337 }
 338 #endif // PRODUCT
 339 
 340 size_t CollectedHeap::max_tlab_size() const {
 341   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 342   // This restriction could be removed by enabling filling with multiple arrays.
 343   // If we compute that the reasonable way as
 344   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 345   // we'll overflow on the multiply, so we do the divide first.
 346   // We actually lose a little by dividing first,
 347   // but that just makes the TLAB  somewhat smaller than the biggest array,
 348   // which is fine, since we'll be able to fill that.
 349   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 350               sizeof(jint) *
 351               ((juint) max_jint / (size_t) HeapWordSize);
 352   return align_down(max_int_size, MinObjAlignment);
 353 }
 354 
 355 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
 356                                            size_t requested_size,
 357                                            size_t* actual_size) {
 358   guarantee(false, "thread-local allocation buffers not supported");
 359   return NULL;
 360 }
 361 
 362 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
 363   ObjAllocator allocator(klass, size, THREAD);
 364   return allocator.allocate();
 365 }
 366 
 367 oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 368   ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
 369   return allocator.allocate();
 370 }
 371 
 372 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
 373   ClassAllocator allocator(klass, size, THREAD);
 374   return allocator.allocate();
 375 }
 376 
 377 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 378   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
 379          "Should only be called at a safepoint or at start-up");
 380 
 381   ThreadLocalAllocStats stats;
 382 
 383   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
 384     BarrierSet::barrier_set()->make_parsable(thread);
 385     if (UseTLAB) {
 386       if (retire_tlabs) {
 387         thread->tlab().retire(&stats);
 388       } else {
 389         thread->tlab().make_parsable();
 390       }
 391     }
 392   }
 393 
 394   stats.publish();
 395 }
 396 
 397 void CollectedHeap::resize_all_tlabs() {
 398   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
 399          "Should only resize tlabs at safepoint");
 400 
 401   if (UseTLAB && ResizeTLAB) {
 402     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
 403       thread->tlab().resize();
 404     }
 405   }
 406 }
 407 
 408 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 409   assert(timer != NULL, "timer is null");
 410   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 411     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 412     HeapDumper::dump_heap();
 413   }
 414 
 415   LogTarget(Trace, gc, classhisto) lt;
 416   if (lt.is_enabled()) {
 417     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 418     ResourceMark rm;
 419     LogStream ls(lt);
 420     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 421     inspector.doit();
 422   }
 423 }
 424 
 425 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 426   full_gc_dump(timer, true);
 427 }
 428 
 429 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 430   full_gc_dump(timer, false);
 431 }
 432 
 433 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 434   // It is important to do this in a way such that concurrent readers can't
 435   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 436   _reserved.set_word_size(0);
 437   _reserved.set_start(start);
 438   _reserved.set_end(end);
 439 }
 440 
 441 void CollectedHeap::post_initialize() {
 442   initialize_serviceability();
 443 }
 444 
 445 #ifndef PRODUCT
 446 
 447 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
 448   // Access to count is not atomic; the value does not have to be exact.
 449   if (PromotionFailureALot) {
 450     const size_t gc_num = total_collections();
 451     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 452     if (elapsed_gcs >= PromotionFailureALotInterval) {
 453       // Test for unsigned arithmetic wrap-around.
 454       if (++*count >= PromotionFailureALotCount) {
 455         *count = 0;
 456         return true;
 457       }
 458     }
 459   }
 460   return false;
 461 }
 462 
 463 bool CollectedHeap::promotion_should_fail() {
 464   return promotion_should_fail(&_promotion_failure_alot_count);
 465 }
 466 
 467 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
 468   if (PromotionFailureALot) {
 469     _promotion_failure_alot_gc_number = total_collections();
 470     *count = 0;
 471   }
 472 }
 473 
 474 void CollectedHeap::reset_promotion_should_fail() {
 475   reset_promotion_should_fail(&_promotion_failure_alot_count);
 476 }
 477 
 478 #endif  // #ifndef PRODUCT
 479 
 480 bool CollectedHeap::supports_object_pinning() const {
 481   return false;
 482 }
 483 
 484 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
 485   ShouldNotReachHere();
 486   return NULL;
 487 }
 488 
 489 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
 490   ShouldNotReachHere();
 491 }
 492 
 493 void CollectedHeap::deduplicate_string(oop str) {
 494   // Do nothing, unless overridden in subclass.
 495 }
 496 
 497 size_t CollectedHeap::obj_size(oop obj) const {
 498   return obj->size();
 499 }