1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcLocker.inline.hpp"
  32 #include "gc/shared/gcHeapSummary.hpp"
  33 #include "gc/shared/gcTrace.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/gcWhen.hpp"
  36 #include "gc/shared/vmGCOperations.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/metaspace.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/instanceMirrorKlass.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/thread.inline.hpp"
  45 #include "runtime/threadSMR.hpp"
  46 #include "runtime/vmThread.hpp"
  47 #include "services/heapDumper.hpp"
  48 #include "utilities/align.hpp"
  49 
  50 class ClassLoaderData;
  51 
  52 #ifdef ASSERT
  53 int CollectedHeap::_fire_out_of_memory_count = 0;
  54 #endif
  55 
  56 size_t CollectedHeap::_filler_array_max_size = 0;
  57 
  58 template <>
  59 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  60   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  61   st->print_raw(m);
  62 }
  63 
  64 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  65   if (!should_log()) {
  66     return;
  67   }
  68 
  69   double timestamp = fetch_timestamp();
  70   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
  71   int index = compute_log_index();
  72   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
  73   _records[index].timestamp = timestamp;
  74   _records[index].data.is_before = before;
  75   stringStream st(_records[index].data.buffer(), _records[index].data.size());
  76 
  77   st.print_cr("{Heap %s GC invocations=%u (full %u):",
  78                  before ? "before" : "after",
  79                  heap->total_collections(),
  80                  heap->total_full_collections());
  81 
  82   heap->print_on(&st);
  83   st.print_cr("}");
  84 }
  85 
  86 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  87   size_t capacity_in_words = capacity() / HeapWordSize;
  88 
  89   return VirtualSpaceSummary(
  90     reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
  91 }
  92 
  93 GCHeapSummary CollectedHeap::create_heap_summary() {
  94   VirtualSpaceSummary heap_space = create_heap_space_summary();
  95   return GCHeapSummary(heap_space, used());
  96 }
  97 
  98 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
  99   const MetaspaceSizes meta_space(
 100       MetaspaceUtils::committed_bytes(),
 101       MetaspaceUtils::used_bytes(),
 102       MetaspaceUtils::reserved_bytes());
 103   const MetaspaceSizes data_space(
 104       MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
 105       MetaspaceUtils::used_bytes(Metaspace::NonClassType),
 106       MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
 107   const MetaspaceSizes class_space(
 108       MetaspaceUtils::committed_bytes(Metaspace::ClassType),
 109       MetaspaceUtils::used_bytes(Metaspace::ClassType),
 110       MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
 111 
 112   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
 113     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
 114   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
 115     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
 116 
 117   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
 118                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
 119 }
 120 
 121 void CollectedHeap::print_heap_before_gc() {
 122   Universe::print_heap_before_gc();
 123   if (_gc_heap_log != NULL) {
 124     _gc_heap_log->log_heap_before(this);
 125   }
 126 }
 127 
 128 void CollectedHeap::print_heap_after_gc() {
 129   Universe::print_heap_after_gc();
 130   if (_gc_heap_log != NULL) {
 131     _gc_heap_log->log_heap_after(this);
 132   }
 133 }
 134 
 135 void CollectedHeap::print_on_error(outputStream* st) const {
 136   st->print_cr("Heap:");
 137   print_extended_on(st);
 138   st->cr();
 139 
 140   BarrierSet::barrier_set()->print_on(st);
 141 }
 142 
 143 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 144   const GCHeapSummary& heap_summary = create_heap_summary();
 145   gc_tracer->report_gc_heap_summary(when, heap_summary);
 146 
 147   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 148   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 149 }
 150 
 151 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
 152   trace_heap(GCWhen::BeforeGC, gc_tracer);
 153 }
 154 
 155 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
 156   trace_heap(GCWhen::AfterGC, gc_tracer);
 157 }
 158 
 159 // WhiteBox API support for concurrent collectors.  These are the
 160 // default implementations, for collectors which don't support this
 161 // feature.
 162 bool CollectedHeap::supports_concurrent_phase_control() const {
 163   return false;
 164 }
 165 
 166 const char* const* CollectedHeap::concurrent_phases() const {
 167   static const char* const result[] = { NULL };
 168   return result;
 169 }
 170 
 171 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 172   return false;
 173 }
 174 
 175 bool CollectedHeap::is_oop(oop object) const {
 176   if (!check_obj_alignment(object)) {
 177     return false;
 178   }
 179 
 180   if (!is_in_reserved(object)) {
 181     return false;
 182   }
 183 
 184   if (is_in_reserved(object->klass_or_null())) {
 185     return false;
 186   }
 187 
 188   return true;
 189 }
 190 
 191 // Memory state functions.
 192 
 193 
 194 CollectedHeap::CollectedHeap() :
 195   _is_gc_active(false),
 196   _total_collections(0),
 197   _total_full_collections(0),
 198   _gc_cause(GCCause::_no_gc),
 199   _gc_lastcause(GCCause::_no_gc)
 200 {
 201   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 202   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 203   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 204                                              max_len / elements_per_word);
 205 
 206   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
 207   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 208 
 209   if (UsePerfData) {
 210     EXCEPTION_MARK;
 211 
 212     // create the gc cause jvmstat counters
 213     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
 214                              80, GCCause::to_string(_gc_cause), CHECK);
 215 
 216     _perf_gc_lastcause =
 217                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
 218                              80, GCCause::to_string(_gc_lastcause), CHECK);
 219   }
 220 
 221   // Create the ring log
 222   if (LogEvents) {
 223     _gc_heap_log = new GCHeapLog();
 224   } else {
 225     _gc_heap_log = NULL;
 226   }
 227 }
 228 
 229 // This interface assumes that it's being called by the
 230 // vm thread. It collects the heap assuming that the
 231 // heap lock is already held and that we are executing in
 232 // the context of the vm thread.
 233 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 234   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 235   assert(Heap_lock->is_locked(), "Precondition#2");
 236   GCCauseSetter gcs(this, cause);
 237   switch (cause) {
 238     case GCCause::_heap_inspection:
 239     case GCCause::_heap_dump:
 240     case GCCause::_metadata_GC_threshold : {
 241       HandleMark hm;
 242       do_full_collection(false);        // don't clear all soft refs
 243       break;
 244     }
 245     case GCCause::_metadata_GC_clear_soft_refs: {
 246       HandleMark hm;
 247       do_full_collection(true);         // do clear all soft refs
 248       break;
 249     }
 250     default:
 251       ShouldNotReachHere(); // Unexpected use of this function
 252   }
 253 }
 254 
 255 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 256                                                             size_t word_size,
 257                                                             Metaspace::MetadataType mdtype) {
 258   uint loop_count = 0;
 259   uint gc_count = 0;
 260   uint full_gc_count = 0;
 261 
 262   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
 263 
 264   do {
 265     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 266     if (result != NULL) {
 267       return result;
 268     }
 269 
 270     if (GCLocker::is_active_and_needs_gc()) {
 271       // If the GCLocker is active, just expand and allocate.
 272       // If that does not succeed, wait if this thread is not
 273       // in a critical section itself.
 274       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
 275       if (result != NULL) {
 276         return result;
 277       }
 278       JavaThread* jthr = JavaThread::current();
 279       if (!jthr->in_critical()) {
 280         // Wait for JNI critical section to be exited
 281         GCLocker::stall_until_clear();
 282         // The GC invoked by the last thread leaving the critical
 283         // section will be a young collection and a full collection
 284         // is (currently) needed for unloading classes so continue
 285         // to the next iteration to get a full GC.
 286         continue;
 287       } else {
 288         if (CheckJNICalls) {
 289           fatal("Possible deadlock due to allocating while"
 290                 " in jni critical section");
 291         }
 292         return NULL;
 293       }
 294     }
 295 
 296     {  // Need lock to get self consistent gc_count's
 297       MutexLocker ml(Heap_lock);
 298       gc_count      = Universe::heap()->total_collections();
 299       full_gc_count = Universe::heap()->total_full_collections();
 300     }
 301 
 302     // Generate a VM operation
 303     VM_CollectForMetadataAllocation op(loader_data,
 304                                        word_size,
 305                                        mdtype,
 306                                        gc_count,
 307                                        full_gc_count,
 308                                        GCCause::_metadata_GC_threshold);
 309     VMThread::execute(&op);
 310 
 311     // If GC was locked out, try again. Check before checking success because the
 312     // prologue could have succeeded and the GC still have been locked out.
 313     if (op.gc_locked()) {
 314       continue;
 315     }
 316 
 317     if (op.prologue_succeeded()) {
 318       return op.result();
 319     }
 320     loop_count++;
 321     if ((QueuedAllocationWarningCount > 0) &&
 322         (loop_count % QueuedAllocationWarningCount == 0)) {
 323       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
 324                             " size=" SIZE_FORMAT, loop_count, word_size);
 325     }
 326   } while (true);  // Until a GC is done
 327 }
 328 
 329 #ifndef PRODUCT
 330 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
 331   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 332     for (size_t slot = 0; slot < size; slot += 1) {
 333       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
 334              "Found badHeapWordValue in post-allocation check");
 335     }
 336   }
 337 }
 338 
 339 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 340   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 341     for (size_t slot = 0; slot < size; slot += 1) {
 342       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 343              "Found non badHeapWordValue in pre-allocation check");
 344     }
 345   }
 346 }
 347 #endif // PRODUCT
 348 
 349 #ifdef ASSERT
 350 void CollectedHeap::check_for_valid_allocation_state() {
 351   Thread *thread = Thread::current();
 352   // How to choose between a pending exception and a potential
 353   // OutOfMemoryError?  Don't allow pending exceptions.
 354   // This is a VM policy failure, so how do we exhaustively test it?
 355   assert(!thread->has_pending_exception(),
 356          "shouldn't be allocating with pending exception");
 357   if (StrictSafepointChecks) {
 358     assert(thread->allow_allocation(),
 359            "Allocation done by thread for which allocation is blocked "
 360            "by No_Allocation_Verifier!");
 361     // Allocation of an oop can always invoke a safepoint,
 362     // hence, the true argument
 363     thread->check_for_valid_safepoint_state(true);
 364   }
 365 }
 366 #endif
 367 
 368 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 369 
 370   // Retain tlab and allocate object in shared space if
 371   // the amount free in the tlab is too large to discard.
 372   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 373     thread->tlab().record_slow_allocation(size);
 374     return NULL;
 375   }
 376 
 377   // Discard tlab and allocate a new one.
 378   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 379   size_t new_tlab_size = thread->tlab().compute_size(size);
 380 
 381   thread->tlab().clear_before_allocation();
 382 
 383   if (new_tlab_size == 0) {
 384     return NULL;
 385   }
 386 
 387   // Allocate a new TLAB requesting new_tlab_size. Any size
 388   // between minimal and new_tlab_size is accepted.
 389   size_t actual_tlab_size = 0;
 390   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
 391   HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
 392   if (obj == NULL) {
 393     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
 394            min_tlab_size, new_tlab_size, actual_tlab_size);
 395     return NULL;
 396   }
 397   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
 398          p2i(obj), min_tlab_size, new_tlab_size);
 399 
 400   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 401 
 402   if (ZeroTLAB) {
 403     // ..and clear it.
 404     Copy::zero_to_words(obj, actual_tlab_size);
 405   } else {
 406     // ...and zap just allocated object.
 407 #ifdef ASSERT
 408     // Skip mangling the space corresponding to the object header to
 409     // ensure that the returned space is not considered parsable by
 410     // any concurrent GC thread.
 411     size_t hdr_size = oopDesc::header_size();
 412     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
 413 #endif // ASSERT
 414   }
 415   thread->tlab().fill(obj, obj + size, actual_tlab_size);
 416   return obj;
 417 }
 418 
 419 size_t CollectedHeap::max_tlab_size() const {
 420   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 421   // This restriction could be removed by enabling filling with multiple arrays.
 422   // If we compute that the reasonable way as
 423   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 424   // we'll overflow on the multiply, so we do the divide first.
 425   // We actually lose a little by dividing first,
 426   // but that just makes the TLAB  somewhat smaller than the biggest array,
 427   // which is fine, since we'll be able to fill that.
 428   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 429               sizeof(jint) *
 430               ((juint) max_jint / (size_t) HeapWordSize);
 431   return align_down(max_int_size, MinObjAlignment);
 432 }
 433 
 434 size_t CollectedHeap::filler_array_hdr_size() {
 435   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
 436 }
 437 
 438 size_t CollectedHeap::filler_array_min_size() {
 439   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 440 }
 441 
 442 #ifdef ASSERT
 443 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 444 {
 445   assert(words >= min_fill_size(), "too small to fill");
 446   assert(is_object_aligned(words), "unaligned size");
 447   assert(Universe::heap()->is_in_reserved(start), "not in heap");
 448   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
 449 }
 450 
 451 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 452 {
 453   if (ZapFillerObjects && zap) {
 454     Copy::fill_to_words(start + filler_array_hdr_size(),
 455                         words - filler_array_hdr_size(), 0XDEAFBABE);
 456   }
 457 }
 458 #endif // ASSERT
 459 
 460 void
 461 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 462 {
 463   assert(words >= filler_array_min_size(), "too small for an array");
 464   assert(words <= filler_array_max_size(), "too big for a single object");
 465 
 466   const size_t payload_size = words - filler_array_hdr_size();
 467   const size_t len = payload_size * HeapWordSize / sizeof(jint);
 468   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
 469 
 470   // Set the length first for concurrent GC.
 471   ((arrayOop)start)->set_length((int)len);
 472   post_allocation_setup_common(Universe::intArrayKlassObj(), start);
 473   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 474 }
 475 
 476 void
 477 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 478 {
 479   assert(words <= filler_array_max_size(), "too big for a single object");
 480 
 481   if (words >= filler_array_min_size()) {
 482     fill_with_array(start, words, zap);
 483   } else if (words > 0) {
 484     assert(words == min_fill_size(), "unaligned size");
 485     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
 486   }
 487 }
 488 
 489 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 490 {
 491   DEBUG_ONLY(fill_args_check(start, words);)
 492   HandleMark hm;  // Free handles before leaving.
 493   fill_with_object_impl(start, words, zap);
 494 }
 495 
 496 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 497 {
 498   DEBUG_ONLY(fill_args_check(start, words);)
 499   HandleMark hm;  // Free handles before leaving.
 500 
 501   // Multiple objects may be required depending on the filler array maximum size. Fill
 502   // the range up to that with objects that are filler_array_max_size sized. The
 503   // remainder is filled with a single object.
 504   const size_t min = min_fill_size();
 505   const size_t max = filler_array_max_size();
 506   while (words > max) {
 507     const size_t cur = (words - max) >= min ? max : max - min;
 508     fill_with_array(start, cur, zap);
 509     start += cur;
 510     words -= cur;
 511   }
 512 
 513   fill_with_object_impl(start, words, zap);
 514 }
 515 
 516 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
 517                                            size_t requested_size,
 518                                            size_t* actual_size) {
 519   guarantee(false, "thread-local allocation buffers not supported");
 520   return NULL;
 521 }
 522 
 523 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 524   // The second disjunct in the assertion below makes a concession
 525   // for the start-up verification done while the VM is being
 526   // created. Callers be careful that you know that mutators
 527   // aren't going to interfere -- for instance, this is permissible
 528   // if we are still single-threaded and have either not yet
 529   // started allocating (nothing much to verify) or we have
 530   // started allocating but are now a full-fledged JavaThread
 531   // (and have thus made our TLAB's) available for filling.
 532   assert(SafepointSynchronize::is_at_safepoint() ||
 533          !is_init_completed(),
 534          "Should only be called at a safepoint or at start-up"
 535          " otherwise concurrent mutator activity may make heap "
 536          " unparsable again");
 537   const bool use_tlab = UseTLAB;
 538   // The main thread starts allocating via a TLAB even before it
 539   // has added itself to the threads list at vm boot-up.
 540   JavaThreadIteratorWithHandle jtiwh;
 541   assert(!use_tlab || jtiwh.length() > 0,
 542          "Attempt to fill tlabs before main thread has been added"
 543          " to threads list is doomed to failure!");
 544   BarrierSet *bs = BarrierSet::barrier_set();
 545   for (; JavaThread *thread = jtiwh.next(); ) {
 546      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
 547      bs->make_parsable(thread);
 548   }
 549 }
 550 
 551 void CollectedHeap::accumulate_statistics_all_tlabs() {
 552   if (UseTLAB) {
 553     assert(SafepointSynchronize::is_at_safepoint() ||
 554          !is_init_completed(),
 555          "should only accumulate statistics on tlabs at safepoint");
 556 
 557     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
 558   }
 559 }
 560 
 561 void CollectedHeap::resize_all_tlabs() {
 562   if (UseTLAB) {
 563     assert(SafepointSynchronize::is_at_safepoint() ||
 564          !is_init_completed(),
 565          "should only resize tlabs at safepoint");
 566 
 567     ThreadLocalAllocBuffer::resize_all_tlabs();
 568   }
 569 }
 570 
 571 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 572   assert(timer != NULL, "timer is null");
 573   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 574     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 575     HeapDumper::dump_heap();
 576   }
 577 
 578   LogTarget(Trace, gc, classhisto) lt;
 579   if (lt.is_enabled()) {
 580     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 581     ResourceMark rm;
 582     LogStream ls(lt);
 583     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 584     inspector.doit();
 585   }
 586 }
 587 
 588 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 589   full_gc_dump(timer, true);
 590 }
 591 
 592 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 593   full_gc_dump(timer, false);
 594 }
 595 
 596 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 597   // It is important to do this in a way such that concurrent readers can't
 598   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 599   _reserved.set_word_size(0);
 600   _reserved.set_start(start);
 601   _reserved.set_end(end);
 602 }
 603 
 604 void CollectedHeap::post_initialize() {
 605   initialize_serviceability();
 606 }
 607 
 608 #ifndef PRODUCT
 609 
 610 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
 611   // Access to count is not atomic; the value does not have to be exact.
 612   if (PromotionFailureALot) {
 613     const size_t gc_num = total_collections();
 614     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 615     if (elapsed_gcs >= PromotionFailureALotInterval) {
 616       // Test for unsigned arithmetic wrap-around.
 617       if (++*count >= PromotionFailureALotCount) {
 618         *count = 0;
 619         return true;
 620       }
 621     }
 622   }
 623   return false;
 624 }
 625 
 626 bool CollectedHeap::promotion_should_fail() {
 627   return promotion_should_fail(&_promotion_failure_alot_count);
 628 }
 629 
 630 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
 631   if (PromotionFailureALot) {
 632     _promotion_failure_alot_gc_number = total_collections();
 633     *count = 0;
 634   }
 635 }
 636 
 637 void CollectedHeap::reset_promotion_should_fail() {
 638   reset_promotion_should_fail(&_promotion_failure_alot_count);
 639 }
 640 
 641 #endif  // #ifndef PRODUCT
 642 
 643 bool CollectedHeap::supports_object_pinning() const {
 644   return false;
 645 }
 646 
 647 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
 648   ShouldNotReachHere();
 649   return NULL;
 650 }
 651 
 652 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
 653   ShouldNotReachHere();
 654 }