1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
  27 
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/gcWhen.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/perfData.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "services/memoryUsage.hpp"
  35 #include "utilities/debug.hpp"
  36 #include "utilities/events.hpp"
  37 #include "utilities/formatBuffer.hpp"
  38 #include "utilities/growableArray.hpp"
  39 
  40 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
  41 // is an abstract class: there may be many different kinds of heaps.  This
  42 // class defines the functions that a heap must implement, and contains
  43 // infrastructure common to all heaps.
  44 
  45 class AdaptiveSizePolicy;
  46 class BarrierSet;
  47 class CollectorPolicy;
  48 class GCHeapSummary;
  49 class GCTimer;
  50 class GCTracer;
  51 class GCMemoryManager;
  52 class MemoryPool;
  53 class MetaspaceSummary;
  54 class SoftRefPolicy;
  55 class Thread;
  56 class ThreadClosure;
  57 class VirtualSpaceSummary;
  58 class WorkGang;
  59 class nmethod;
  60 
  61 class GCMessage : public FormatBuffer<1024> {
  62  public:
  63   bool is_before;
  64 
  65  public:
  66   GCMessage() {}
  67 };
  68 
  69 class CollectedHeap;
  70 
  71 class GCHeapLog : public EventLogBase<GCMessage> {
  72  private:
  73   void log_heap(CollectedHeap* heap, bool before);
  74 
  75  public:
  76   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  77 
  78   void log_heap_before(CollectedHeap* heap) {
  79     log_heap(heap, true);
  80   }
  81   void log_heap_after(CollectedHeap* heap) {
  82     log_heap(heap, false);
  83   }
  84 };
  85 
  86 //
  87 // CollectedHeap
  88 //   GenCollectedHeap
  89 //     SerialHeap
  90 //     CMSHeap
  91 //   G1CollectedHeap
  92 //   ShenandoahHeap
  93 //   ParallelScavengeHeap
  94 //   ZCollectedHeap
  95 //
  96 class CollectedHeap : public CHeapObj<mtInternal> {
  97   friend class VMStructs;
  98   friend class JVMCIVMStructs;
  99   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 100   friend class MemAllocator;
 101 
 102  private:
 103 #ifdef ASSERT
 104   static int       _fire_out_of_memory_count;
 105 #endif
 106 
 107   GCHeapLog* _gc_heap_log;
 108 
 109   MemRegion _reserved;
 110 
 111  protected:
 112   bool _is_gc_active;
 113 
 114   // Used for filler objects (static, but initialized in ctor).
 115   static size_t _filler_array_max_size;
 116 
 117   unsigned int _total_collections;          // ... started
 118   unsigned int _total_full_collections;     // ... started
 119   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
 120   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
 121 
 122   // Reason for current garbage collection.  Should be set to
 123   // a value reflecting no collection between collections.
 124   GCCause::Cause _gc_cause;
 125   GCCause::Cause _gc_lastcause;
 126   PerfStringVariable* _perf_gc_cause;
 127   PerfStringVariable* _perf_gc_lastcause;
 128 
 129   // Constructor
 130   CollectedHeap();
 131 
 132   // Create a new tlab. All TLAB allocations must go through this.
 133   // To allow more flexible TLAB allocations min_size specifies
 134   // the minimum size needed, while requested_size is the requested
 135   // size based on ergonomics. The actually allocated size will be
 136   // returned in actual_size.
 137   virtual HeapWord* allocate_new_tlab(size_t min_size,
 138                                       size_t requested_size,
 139                                       size_t* actual_size);
 140 
 141   // Reinitialize tlabs before resuming mutators.
 142   virtual void resize_all_tlabs();
 143 
 144   // Raw memory allocation facilities
 145   // The obj and array allocate methods are covers for these methods.
 146   // mem_allocate() should never be
 147   // called to allocate TLABs, only individual objects.
 148   virtual HeapWord* mem_allocate(size_t size,
 149                                  bool* gc_overhead_limit_was_exceeded) = 0;
 150 
 151   // Filler object utilities.
 152   static inline size_t filler_array_hdr_size();
 153   static inline size_t filler_array_min_size();
 154 
 155   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
 156   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
 157 
 158   // Fill with a single array; caller must ensure filler_array_min_size() <=
 159   // words <= filler_array_max_size().
 160   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
 161 
 162   // Fill with a single object (either an int array or a java.lang.Object).
 163   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 164 
 165   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 166 
 167   // Verification functions
 168   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
 169     PRODUCT_RETURN;
 170   debug_only(static void check_for_valid_allocation_state();)
 171 
 172  public:
 173   enum Name {
 174     None,
 175     Serial,
 176     Parallel,
 177     CMS,
 178     G1,
 179     Epsilon,
 180     Z,
 181     Shenandoah
 182   };
 183 
 184   static inline size_t filler_array_max_size() {
 185     return _filler_array_max_size;
 186   }
 187 
 188   virtual Name kind() const = 0;
 189 
 190   virtual const char* name() const = 0;
 191 
 192   /**
 193    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 194    * and JNI_OK on success.
 195    */
 196   virtual jint initialize() = 0;
 197 
 198   // In many heaps, there will be a need to perform some initialization activities
 199   // after the Universe is fully formed, but before general heap allocation is allowed.
 200   // This is the correct place to place such initialization methods.
 201   virtual void post_initialize();
 202 
 203   // Stop any onging concurrent work and prepare for exit.
 204   virtual void stop() {}
 205 
 206   // Stop and resume concurrent GC threads interfering with safepoint operations
 207   virtual void safepoint_synchronize_begin() {}
 208   virtual void safepoint_synchronize_end() {}
 209 
 210   void initialize_reserved_region(HeapWord *start, HeapWord *end);
 211   MemRegion reserved_region() const { return _reserved; }
 212   address base() const { return (address)reserved_region().start(); }
 213 
 214   virtual size_t capacity() const = 0;
 215   virtual size_t used() const = 0;
 216 
 217   // Return "true" if the part of the heap that allocates Java
 218   // objects has reached the maximal committed limit that it can
 219   // reach, without a garbage collection.
 220   virtual bool is_maximal_no_gc() const = 0;
 221 
 222   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
 223   // memory that the vm could make available for storing 'normal' java objects.
 224   // This is based on the reserved address space, but should not include space
 225   // that the vm uses internally for bookkeeping or temporary storage
 226   // (e.g., in the case of the young gen, one of the survivor
 227   // spaces).
 228   virtual size_t max_capacity() const = 0;
 229 
 230   // Returns "TRUE" if "p" points into the reserved area of the heap.
 231   bool is_in_reserved(const void* p) const {
 232     return _reserved.contains(p);
 233   }
 234 
 235   bool is_in_reserved_or_null(const void* p) const {
 236     return p == NULL || is_in_reserved(p);
 237   }
 238 
 239   // Returns "TRUE" iff "p" points into the committed areas of the heap.
 240   // This method can be expensive so avoid using it in performance critical
 241   // code.
 242   virtual bool is_in(const void* p) const = 0;
 243 
 244   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
 245 
 246   // Let's define some terms: a "closed" subset of a heap is one that
 247   //
 248   // 1) contains all currently-allocated objects, and
 249   //
 250   // 2) is closed under reference: no object in the closed subset
 251   //    references one outside the closed subset.
 252   //
 253   // Membership in a heap's closed subset is useful for assertions.
 254   // Clearly, the entire heap is a closed subset, so the default
 255   // implementation is to use "is_in_reserved".  But this may not be too
 256   // liberal to perform useful checking.  Also, the "is_in" predicate
 257   // defines a closed subset, but may be too expensive, since "is_in"
 258   // verifies that its argument points to an object head.  The
 259   // "closed_subset" method allows a heap to define an intermediate
 260   // predicate, allowing more precise checking than "is_in_reserved" at
 261   // lower cost than "is_in."
 262 
 263   // One important case is a heap composed of disjoint contiguous spaces,
 264   // such as the Garbage-First collector.  Such heaps have a convenient
 265   // closed subset consisting of the allocated portions of those
 266   // contiguous spaces.
 267 
 268   // Return "TRUE" iff the given pointer points into the heap's defined
 269   // closed subset (which defaults to the entire heap).
 270   virtual bool is_in_closed_subset(const void* p) const {
 271     return is_in_reserved(p);
 272   }
 273 
 274   bool is_in_closed_subset_or_null(const void* p) const {
 275     return p == NULL || is_in_closed_subset(p);
 276   }
 277 
 278   void set_gc_cause(GCCause::Cause v) {
 279      if (UsePerfData) {
 280        _gc_lastcause = _gc_cause;
 281        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
 282        _perf_gc_cause->set_value(GCCause::to_string(v));
 283      }
 284     _gc_cause = v;
 285   }
 286   GCCause::Cause gc_cause() { return _gc_cause; }
 287 
 288   virtual oop obj_allocate(Klass* klass, int size, TRAPS);
 289   virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
 290   virtual oop class_allocate(Klass* klass, int size, TRAPS);
 291 
 292   // Utilities for turning raw memory into filler objects.
 293   //
 294   // min_fill_size() is the smallest region that can be filled.
 295   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 296   // multiple objects.  fill_with_object() is for regions known to be smaller
 297   // than the largest array of integers; it uses a single object to fill the
 298   // region and has slightly less overhead.
 299   static size_t min_fill_size() {
 300     return size_t(align_object_size(oopDesc::header_size()));
 301   }
 302 
 303   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 304 
 305   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 306   static void fill_with_object(MemRegion region, bool zap = true) {
 307     fill_with_object(region.start(), region.word_size(), zap);
 308   }
 309   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 310     fill_with_object(start, pointer_delta(end, start), zap);
 311   }
 312 
 313   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
 314   virtual size_t min_dummy_object_size() const;
 315   size_t tlab_alloc_reserve() const;
 316 
 317   // Return the address "addr" aligned by "alignment_in_bytes" if such
 318   // an address is below "end".  Return NULL otherwise.
 319   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
 320                                                    HeapWord* end,
 321                                                    unsigned short alignment_in_bytes);
 322 
 323   // Some heaps may offer a contiguous region for shared non-blocking
 324   // allocation, via inlined code (by exporting the address of the top and
 325   // end fields defining the extent of the contiguous allocation region.)
 326 
 327   // This function returns "true" iff the heap supports this kind of
 328   // allocation.  (Default is "no".)
 329   virtual bool supports_inline_contig_alloc() const {
 330     return false;
 331   }
 332   // These functions return the addresses of the fields that define the
 333   // boundaries of the contiguous allocation area.  (These fields should be
 334   // physically near to one another.)
 335   virtual HeapWord* volatile* top_addr() const {
 336     guarantee(false, "inline contiguous allocation not supported");
 337     return NULL;
 338   }
 339   virtual HeapWord** end_addr() const {
 340     guarantee(false, "inline contiguous allocation not supported");
 341     return NULL;
 342   }
 343 
 344   // Some heaps may be in an unparseable state at certain times between
 345   // collections. This may be necessary for efficient implementation of
 346   // certain allocation-related activities. Calling this function before
 347   // attempting to parse a heap ensures that the heap is in a parsable
 348   // state (provided other concurrent activity does not introduce
 349   // unparsability). It is normally expected, therefore, that this
 350   // method is invoked with the world stopped.
 351   // NOTE: if you override this method, make sure you call
 352   // super::ensure_parsability so that the non-generational
 353   // part of the work gets done. See implementation of
 354   // CollectedHeap::ensure_parsability and, for instance,
 355   // that of GenCollectedHeap::ensure_parsability().
 356   // The argument "retire_tlabs" controls whether existing TLABs
 357   // are merely filled or also retired, thus preventing further
 358   // allocation from them and necessitating allocation of new TLABs.
 359   virtual void ensure_parsability(bool retire_tlabs);
 360 
 361   // Section on thread-local allocation buffers (TLABs)
 362   // If the heap supports thread-local allocation buffers, it should override
 363   // the following methods:
 364   // Returns "true" iff the heap supports thread-local allocation buffers.
 365   // The default is "no".
 366   virtual bool supports_tlab_allocation() const = 0;
 367 
 368   // The amount of space available for thread-local allocation buffers.
 369   virtual size_t tlab_capacity(Thread *thr) const = 0;
 370 
 371   // The amount of used space for thread-local allocation buffers for the given thread.
 372   virtual size_t tlab_used(Thread *thr) const = 0;
 373 
 374   virtual size_t max_tlab_size() const;
 375 
 376   // An estimate of the maximum allocation that could be performed
 377   // for thread-local allocation buffers without triggering any
 378   // collection or expansion activity.
 379   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 380     guarantee(false, "thread-local allocation buffers not supported");
 381     return 0;
 382   }
 383 
 384   // Perform a collection of the heap; intended for use in implementing
 385   // "System.gc".  This probably implies as full a collection as the
 386   // "CollectedHeap" supports.
 387   virtual void collect(GCCause::Cause cause) = 0;
 388 
 389   // Perform a full collection
 390   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 391 
 392   // This interface assumes that it's being called by the
 393   // vm thread. It collects the heap assuming that the
 394   // heap lock is already held and that we are executing in
 395   // the context of the vm thread.
 396   virtual void collect_as_vm_thread(GCCause::Cause cause);
 397 
 398   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 399                                                        size_t size,
 400                                                        Metaspace::MetadataType mdtype);
 401 
 402   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 403   // that it should answer "false" for the concurrent part of a concurrent
 404   // collector -- dld).
 405   bool is_gc_active() const { return _is_gc_active; }
 406 
 407   // Total number of GC collections (started)
 408   unsigned int total_collections() const { return _total_collections; }
 409   unsigned int total_full_collections() const { return _total_full_collections;}
 410 
 411   // Increment total number of GC collections (started)
 412   // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
 413   void increment_total_collections(bool full = false) {
 414     _total_collections++;
 415     if (full) {
 416       increment_total_full_collections();
 417     }
 418   }
 419 
 420   void increment_total_full_collections() { _total_full_collections++; }
 421 
 422   // Return the CollectorPolicy for the heap
 423   virtual CollectorPolicy* collector_policy() const = 0;
 424 
 425   // Return the SoftRefPolicy for the heap;
 426   virtual SoftRefPolicy* soft_ref_policy() = 0;
 427 
 428   virtual MemoryUsage memory_usage();
 429   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
 430   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
 431 
 432   // Iterate over all objects, calling "cl.do_object" on each.
 433   virtual void object_iterate(ObjectClosure* cl) = 0;
 434 
 435   // Similar to object_iterate() except iterates only
 436   // over live objects.
 437   virtual void safe_object_iterate(ObjectClosure* cl) = 0;
 438 
 439   // NOTE! There is no requirement that a collector implement these
 440   // functions.
 441   //
 442   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
 443   // each address in the (reserved) heap is a member of exactly
 444   // one block.  The defining characteristic of a block is that it is
 445   // possible to find its size, and thus to progress forward to the next
 446   // block.  (Blocks may be of different sizes.)  Thus, blocks may
 447   // represent Java objects, or they might be free blocks in a
 448   // free-list-based heap (or subheap), as long as the two kinds are
 449   // distinguishable and the size of each is determinable.
 450 
 451   // Returns the address of the start of the "block" that contains the
 452   // address "addr".  We say "blocks" instead of "object" since some heaps
 453   // may not pack objects densely; a chunk may either be an object or a
 454   // non-object.
 455   virtual HeapWord* block_start(const void* addr) const = 0;
 456 
 457   // Requires "addr" to be the start of a chunk, and returns its size.
 458   // "addr + size" is required to be the start of a new chunk, or the end
 459   // of the active area of the heap.
 460   virtual size_t block_size(const HeapWord* addr) const = 0;
 461 
 462   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 463   // the block is an object.
 464   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 465 
 466   // Returns the longest time (in ms) that has elapsed since the last
 467   // time that any part of the heap was examined by a garbage collection.
 468   virtual jlong millis_since_last_gc() = 0;
 469 
 470   // Perform any cleanup actions necessary before allowing a verification.
 471   virtual void prepare_for_verify() = 0;
 472 
 473   // Generate any dumps preceding or following a full gc
 474  private:
 475   void full_gc_dump(GCTimer* timer, bool before);
 476 
 477   virtual void initialize_serviceability() = 0;
 478 
 479  public:
 480   void pre_full_gc_dump(GCTimer* timer);
 481   void post_full_gc_dump(GCTimer* timer);
 482 
 483   virtual VirtualSpaceSummary create_heap_space_summary();
 484   GCHeapSummary create_heap_summary();
 485 
 486   MetaspaceSummary create_metaspace_summary();
 487 
 488   // Print heap information on the given outputStream.
 489   virtual void print_on(outputStream* st) const = 0;
 490   // The default behavior is to call print_on() on tty.
 491   virtual void print() const {
 492     print_on(tty);
 493   }
 494   // Print more detailed heap information on the given
 495   // outputStream. The default behavior is to call print_on(). It is
 496   // up to each subclass to override it and add any additional output
 497   // it needs.
 498   virtual void print_extended_on(outputStream* st) const {
 499     print_on(st);
 500   }
 501 
 502   virtual void print_on_error(outputStream* st) const;
 503 
 504   // Print all GC threads (other than the VM thread)
 505   // used by this heap.
 506   virtual void print_gc_threads_on(outputStream* st) const = 0;
 507   // The default behavior is to call print_gc_threads_on() on tty.
 508   void print_gc_threads() {
 509     print_gc_threads_on(tty);
 510   }
 511   // Iterator for all GC threads (other than VM thread)
 512   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
 513 
 514   // Print any relevant tracing info that flags imply.
 515   // Default implementation does nothing.
 516   virtual void print_tracing_info() const = 0;
 517 
 518   void print_heap_before_gc();
 519   void print_heap_after_gc();
 520 
 521   // An object is scavengable if its location may move during a scavenge.
 522   // (A scavenge is a GC which is not a full GC.)
 523   virtual bool is_scavengable(oop obj) = 0;
 524   // Registering and unregistering an nmethod (compiled code) with the heap.
 525   // Override with specific mechanism for each specialized heap type.
 526   virtual void register_nmethod(nmethod* nm) {}
 527   virtual void unregister_nmethod(nmethod* nm) {}
 528   virtual void verify_nmethod(nmethod* nmethod) {}
 529 
 530   void trace_heap_before_gc(const GCTracer* gc_tracer);
 531   void trace_heap_after_gc(const GCTracer* gc_tracer);
 532 
 533   // Heap verification
 534   virtual void verify(VerifyOption option) = 0;
 535 
 536   // Return true if concurrent phase control (via
 537   // request_concurrent_phase_control) is supported by this collector.
 538   // The default implementation returns false.
 539   virtual bool supports_concurrent_phase_control() const;
 540 
 541   // Return a NULL terminated array of concurrent phase names provided
 542   // by this collector.  Supports Whitebox testing.  These are the
 543   // names recognized by request_concurrent_phase(). The default
 544   // implementation returns an array of one NULL element.
 545   virtual const char* const* concurrent_phases() const;
 546 
 547   // Request the collector enter the indicated concurrent phase, and
 548   // wait until it does so.  Supports WhiteBox testing.  Only one
 549   // request may be active at a time.  Phases are designated by name;
 550   // the set of names and their meaning is GC-specific.  Once the
 551   // requested phase has been reached, the collector will attempt to
 552   // avoid transitioning to a new phase until a new request is made.
 553   // [Note: A collector might not be able to remain in a given phase.
 554   // For example, a full collection might cancel an in-progress
 555   // concurrent collection.]
 556   //
 557   // Returns true when the phase is reached.  Returns false for an
 558   // unknown phase.  The default implementation returns false.
 559   virtual bool request_concurrent_phase(const char* phase);
 560 
 561   // Provides a thread pool to SafepointSynchronize to use
 562   // for parallel safepoint cleanup.
 563   // GCs that use a GC worker thread pool may want to share
 564   // it for use during safepoint cleanup. This is only possible
 565   // if the GC can pause and resume concurrent work (e.g. G1
 566   // concurrent marking) for an intermittent non-GC safepoint.
 567   // If this method returns NULL, SafepointSynchronize will
 568   // perform cleanup tasks serially in the VMThread.
 569   virtual WorkGang* get_safepoint_workers() { return NULL; }
 570 
 571   // Support for object pinning. This is used by JNI Get*Critical()
 572   // and Release*Critical() family of functions. If supported, the GC
 573   // must guarantee that pinned objects never move.
 574   virtual bool supports_object_pinning() const;
 575   virtual oop pin_object(JavaThread* thread, oop obj);
 576   virtual void unpin_object(JavaThread* thread, oop obj);
 577 
 578   // Deduplicate the string, iff the GC supports string deduplication.
 579   virtual void deduplicate_string(oop str);
 580 
 581   virtual bool is_oop(oop object) const;
 582 
 583   virtual size_t obj_size(oop obj) const;
 584 
 585   // Cells are memory slices allocated by the allocator. Objects are initialized
 586   // in cells. The cell itself may have a header, found at a negative offset of
 587   // oops. Usually, the size of the cell header is 0, but it may be larger.
 588   virtual ptrdiff_t cell_header_size() const { return 0; }
 589 
 590   // Non product verification and debugging.
 591 #ifndef PRODUCT
 592   // Support for PromotionFailureALot.  Return true if it's time to cause a
 593   // promotion failure.  The no-argument version uses
 594   // this->_promotion_failure_alot_count as the counter.
 595   bool promotion_should_fail(volatile size_t* count);
 596   bool promotion_should_fail();
 597 
 598   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 599   // GC in which promotion failure occurred.
 600   void reset_promotion_should_fail(volatile size_t* count);
 601   void reset_promotion_should_fail();
 602 #endif  // #ifndef PRODUCT
 603 
 604 #ifdef ASSERT
 605   static int fired_fake_oom() {
 606     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 607   }
 608 #endif
 609 };
 610 
 611 // Class to set and reset the GC cause for a CollectedHeap.
 612 
 613 class GCCauseSetter : StackObj {
 614   CollectedHeap* _heap;
 615   GCCause::Cause _previous_cause;
 616  public:
 617   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
 618     _heap = heap;
 619     _previous_cause = _heap->gc_cause();
 620     _heap->set_gc_cause(cause);
 621   }
 622 
 623   ~GCCauseSetter() {
 624     _heap->set_gc_cause(_previous_cause);
 625   }
 626 };
 627 
 628 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP