1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
  27 
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/gcWhen.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/perfData.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/events.hpp"
  36 #include "utilities/formatBuffer.hpp"
  37 #include "utilities/growableArray.hpp"
  38 
  39 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
  40 // is an abstract class: there may be many different kinds of heaps.  This
  41 // class defines the functions that a heap must implement, and contains
  42 // infrastructure common to all heaps.
  43 
  44 class AdaptiveSizePolicy;
  45 class BarrierSet;
  46 class CollectorPolicy;
  47 class GCHeapSummary;
  48 class GCTimer;
  49 class GCTracer;
  50 class GCMemoryManager;
  51 class MemoryPool;
  52 class MetaspaceSummary;
  53 class Thread;
  54 class ThreadClosure;
  55 class VirtualSpaceSummary;
  56 class WorkGang;
  57 class nmethod;
  58 
  59 class GCMessage : public FormatBuffer<1024> {
  60  public:
  61   bool is_before;
  62 
  63  public:
  64   GCMessage() {}
  65 };
  66 
  67 class CollectedHeap;
  68 
  69 class GCHeapLog : public EventLogBase<GCMessage> {
  70  private:
  71   void log_heap(CollectedHeap* heap, bool before);
  72 
  73  public:
  74   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  75 
  76   void log_heap_before(CollectedHeap* heap) {
  77     log_heap(heap, true);
  78   }
  79   void log_heap_after(CollectedHeap* heap) {
  80     log_heap(heap, false);
  81   }
  82 };
  83 
  84 //
  85 // CollectedHeap
  86 //   GenCollectedHeap
  87 //     SerialHeap
  88 //     CMSHeap
  89 //   G1CollectedHeap
  90 //   ParallelScavengeHeap
  91 //
  92 class CollectedHeap : public CHeapObj<mtInternal> {
  93   friend class VMStructs;
  94   friend class JVMCIVMStructs;
  95   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  96 
  97  private:
  98 #ifdef ASSERT
  99   static int       _fire_out_of_memory_count;
 100 #endif
 101 
 102   GCHeapLog* _gc_heap_log;
 103 
 104   MemRegion _reserved;
 105 
 106  protected:
 107   BarrierSet* _barrier_set;
 108   bool _is_gc_active;
 109 
 110   // Used for filler objects (static, but initialized in ctor).
 111   static size_t _filler_array_max_size;
 112 
 113   unsigned int _total_collections;          // ... started
 114   unsigned int _total_full_collections;     // ... started
 115   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
 116   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
 117 
 118   // Reason for current garbage collection.  Should be set to
 119   // a value reflecting no collection between collections.
 120   GCCause::Cause _gc_cause;
 121   GCCause::Cause _gc_lastcause;
 122   PerfStringVariable* _perf_gc_cause;
 123   PerfStringVariable* _perf_gc_lastcause;
 124 
 125   // Constructor
 126   CollectedHeap();
 127 
 128   // Create a new tlab. All TLAB allocations must go through this.
 129   virtual HeapWord* allocate_new_tlab(size_t size);
 130 
 131   // Accumulate statistics on all tlabs.
 132   virtual void accumulate_statistics_all_tlabs();
 133 
 134   // Reinitialize tlabs before resuming mutators.
 135   virtual void resize_all_tlabs();
 136 
 137   // Allocate from the current thread's TLAB, with broken-out slow path.
 138   inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
 139   static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
 140 
 141   // Allocate an uninitialized block of the given size, or returns NULL if
 142   // this is impossible.
 143   inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
 144 
 145   // Like allocate_init, but the block returned by a successful allocation
 146   // is guaranteed initialized to zeros.
 147   inline static HeapWord* common_mem_allocate_init(Klass* klass, size_t size, TRAPS);
 148 
 149   // Helper functions for (VM) allocation.
 150   inline static void post_allocation_setup_common(Klass* klass, HeapWord* obj);
 151   inline static void post_allocation_setup_no_klass_install(Klass* klass,
 152                                                             HeapWord* objPtr);
 153 
 154   inline static void post_allocation_setup_obj(Klass* klass, HeapWord* obj, int size);
 155 
 156   inline static void post_allocation_setup_array(Klass* klass,
 157                                                  HeapWord* obj, int length);
 158 
 159   inline static void post_allocation_setup_class(Klass* klass, HeapWord* obj, int size);
 160 
 161   // Clears an allocated object.
 162   inline static void init_obj(HeapWord* obj, size_t size);
 163 
 164   // Filler object utilities.
 165   static inline size_t filler_array_hdr_size();
 166   static inline size_t filler_array_min_size();
 167 
 168   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
 169   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
 170 
 171   // Fill with a single array; caller must ensure filler_array_min_size() <=
 172   // words <= filler_array_max_size().
 173   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
 174 
 175   // Fill with a single object (either an int array or a java.lang.Object).
 176   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 177 
 178   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 179 
 180   // Verification functions
 181   virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
 182     PRODUCT_RETURN;
 183   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
 184     PRODUCT_RETURN;
 185   debug_only(static void check_for_valid_allocation_state();)
 186 
 187  public:
 188   enum Name {
 189     SerialHeap,
 190     ParallelScavengeHeap,
 191     G1CollectedHeap,
 192     CMSHeap
 193   };
 194 
 195   static inline size_t filler_array_max_size() {
 196     return _filler_array_max_size;
 197   }
 198 
 199   virtual Name kind() const = 0;
 200 
 201   virtual const char* name() const = 0;
 202 
 203   /**
 204    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 205    * and JNI_OK on success.
 206    */
 207   virtual jint initialize() = 0;
 208 
 209   // In many heaps, there will be a need to perform some initialization activities
 210   // after the Universe is fully formed, but before general heap allocation is allowed.
 211   // This is the correct place to place such initialization methods.
 212   virtual void post_initialize();
 213 
 214   // Stop any onging concurrent work and prepare for exit.
 215   virtual void stop() {}
 216 
 217   // Stop and resume concurrent GC threads interfering with safepoint operations
 218   virtual void safepoint_synchronize_begin() {}
 219   virtual void safepoint_synchronize_end() {}
 220 
 221   void initialize_reserved_region(HeapWord *start, HeapWord *end);
 222   MemRegion reserved_region() const { return _reserved; }
 223   address base() const { return (address)reserved_region().start(); }
 224 
 225   virtual size_t capacity() const = 0;
 226   virtual size_t used() const = 0;
 227 
 228   // Return "true" if the part of the heap that allocates Java
 229   // objects has reached the maximal committed limit that it can
 230   // reach, without a garbage collection.
 231   virtual bool is_maximal_no_gc() const = 0;
 232 
 233   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
 234   // memory that the vm could make available for storing 'normal' java objects.
 235   // This is based on the reserved address space, but should not include space
 236   // that the vm uses internally for bookkeeping or temporary storage
 237   // (e.g., in the case of the young gen, one of the survivor
 238   // spaces).
 239   virtual size_t max_capacity() const = 0;
 240 
 241   // Returns "TRUE" if "p" points into the reserved area of the heap.
 242   bool is_in_reserved(const void* p) const {
 243     return _reserved.contains(p);
 244   }
 245 
 246   bool is_in_reserved_or_null(const void* p) const {
 247     return p == NULL || is_in_reserved(p);
 248   }
 249 
 250   // Returns "TRUE" iff "p" points into the committed areas of the heap.
 251   // This method can be expensive so avoid using it in performance critical
 252   // code.
 253   virtual bool is_in(const void* p) const = 0;
 254 
 255   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
 256 
 257   // Let's define some terms: a "closed" subset of a heap is one that
 258   //
 259   // 1) contains all currently-allocated objects, and
 260   //
 261   // 2) is closed under reference: no object in the closed subset
 262   //    references one outside the closed subset.
 263   //
 264   // Membership in a heap's closed subset is useful for assertions.
 265   // Clearly, the entire heap is a closed subset, so the default
 266   // implementation is to use "is_in_reserved".  But this may not be too
 267   // liberal to perform useful checking.  Also, the "is_in" predicate
 268   // defines a closed subset, but may be too expensive, since "is_in"
 269   // verifies that its argument points to an object head.  The
 270   // "closed_subset" method allows a heap to define an intermediate
 271   // predicate, allowing more precise checking than "is_in_reserved" at
 272   // lower cost than "is_in."
 273 
 274   // One important case is a heap composed of disjoint contiguous spaces,
 275   // such as the Garbage-First collector.  Such heaps have a convenient
 276   // closed subset consisting of the allocated portions of those
 277   // contiguous spaces.
 278 
 279   // Return "TRUE" iff the given pointer points into the heap's defined
 280   // closed subset (which defaults to the entire heap).
 281   virtual bool is_in_closed_subset(const void* p) const {
 282     return is_in_reserved(p);
 283   }
 284 
 285   bool is_in_closed_subset_or_null(const void* p) const {
 286     return p == NULL || is_in_closed_subset(p);
 287   }
 288 
 289   void set_gc_cause(GCCause::Cause v) {
 290      if (UsePerfData) {
 291        _gc_lastcause = _gc_cause;
 292        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
 293        _perf_gc_cause->set_value(GCCause::to_string(v));
 294      }
 295     _gc_cause = v;
 296   }
 297   GCCause::Cause gc_cause() { return _gc_cause; }
 298 
 299   // General obj/array allocation facilities.
 300   inline static oop obj_allocate(Klass* klass, int size, TRAPS);
 301   inline static oop array_allocate(Klass* klass, int size, int length, TRAPS);
 302   inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
 303   inline static oop class_allocate(Klass* klass, int size, TRAPS);
 304 
 305   // Raw memory allocation facilities
 306   // The obj and array allocate methods are covers for these methods.
 307   // mem_allocate() should never be
 308   // called to allocate TLABs, only individual objects.
 309   virtual HeapWord* mem_allocate(size_t size,
 310                                  bool* gc_overhead_limit_was_exceeded) = 0;
 311 
 312   // Utilities for turning raw memory into filler objects.
 313   //
 314   // min_fill_size() is the smallest region that can be filled.
 315   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 316   // multiple objects.  fill_with_object() is for regions known to be smaller
 317   // than the largest array of integers; it uses a single object to fill the
 318   // region and has slightly less overhead.
 319   static size_t min_fill_size() {
 320     return size_t(align_object_size(oopDesc::header_size()));
 321   }
 322 
 323   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 324 
 325   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 326   static void fill_with_object(MemRegion region, bool zap = true) {
 327     fill_with_object(region.start(), region.word_size(), zap);
 328   }
 329   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 330     fill_with_object(start, pointer_delta(end, start), zap);
 331   }
 332 
 333   // Return the address "addr" aligned by "alignment_in_bytes" if such
 334   // an address is below "end".  Return NULL otherwise.
 335   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
 336                                                    HeapWord* end,
 337                                                    unsigned short alignment_in_bytes);
 338 
 339   // Some heaps may offer a contiguous region for shared non-blocking
 340   // allocation, via inlined code (by exporting the address of the top and
 341   // end fields defining the extent of the contiguous allocation region.)
 342 
 343   // This function returns "true" iff the heap supports this kind of
 344   // allocation.  (Default is "no".)
 345   virtual bool supports_inline_contig_alloc() const {
 346     return false;
 347   }
 348   // These functions return the addresses of the fields that define the
 349   // boundaries of the contiguous allocation area.  (These fields should be
 350   // physically near to one another.)
 351   virtual HeapWord* volatile* top_addr() const {
 352     guarantee(false, "inline contiguous allocation not supported");
 353     return NULL;
 354   }
 355   virtual HeapWord** end_addr() const {
 356     guarantee(false, "inline contiguous allocation not supported");
 357     return NULL;
 358   }
 359 
 360   // Some heaps may be in an unparseable state at certain times between
 361   // collections. This may be necessary for efficient implementation of
 362   // certain allocation-related activities. Calling this function before
 363   // attempting to parse a heap ensures that the heap is in a parsable
 364   // state (provided other concurrent activity does not introduce
 365   // unparsability). It is normally expected, therefore, that this
 366   // method is invoked with the world stopped.
 367   // NOTE: if you override this method, make sure you call
 368   // super::ensure_parsability so that the non-generational
 369   // part of the work gets done. See implementation of
 370   // CollectedHeap::ensure_parsability and, for instance,
 371   // that of GenCollectedHeap::ensure_parsability().
 372   // The argument "retire_tlabs" controls whether existing TLABs
 373   // are merely filled or also retired, thus preventing further
 374   // allocation from them and necessitating allocation of new TLABs.
 375   virtual void ensure_parsability(bool retire_tlabs);
 376 
 377   // Section on thread-local allocation buffers (TLABs)
 378   // If the heap supports thread-local allocation buffers, it should override
 379   // the following methods:
 380   // Returns "true" iff the heap supports thread-local allocation buffers.
 381   // The default is "no".
 382   virtual bool supports_tlab_allocation() const = 0;
 383 
 384   // The amount of space available for thread-local allocation buffers.
 385   virtual size_t tlab_capacity(Thread *thr) const = 0;
 386 
 387   // The amount of used space for thread-local allocation buffers for the given thread.
 388   virtual size_t tlab_used(Thread *thr) const = 0;
 389 
 390   virtual size_t max_tlab_size() const;
 391 
 392   // An estimate of the maximum allocation that could be performed
 393   // for thread-local allocation buffers without triggering any
 394   // collection or expansion activity.
 395   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 396     guarantee(false, "thread-local allocation buffers not supported");
 397     return 0;
 398   }
 399 
 400   // Perform a collection of the heap; intended for use in implementing
 401   // "System.gc".  This probably implies as full a collection as the
 402   // "CollectedHeap" supports.
 403   virtual void collect(GCCause::Cause cause) = 0;
 404 
 405   // Perform a full collection
 406   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 407 
 408   // This interface assumes that it's being called by the
 409   // vm thread. It collects the heap assuming that the
 410   // heap lock is already held and that we are executing in
 411   // the context of the vm thread.
 412   virtual void collect_as_vm_thread(GCCause::Cause cause);
 413 
 414   // Returns the barrier set for this heap
 415   BarrierSet* barrier_set() { return _barrier_set; }
 416   void set_barrier_set(BarrierSet* barrier_set);
 417 
 418   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 419   // that it should answer "false" for the concurrent part of a concurrent
 420   // collector -- dld).
 421   bool is_gc_active() const { return _is_gc_active; }
 422 
 423   // Total number of GC collections (started)
 424   unsigned int total_collections() const { return _total_collections; }
 425   unsigned int total_full_collections() const { return _total_full_collections;}
 426 
 427   // Increment total number of GC collections (started)
 428   // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
 429   void increment_total_collections(bool full = false) {
 430     _total_collections++;
 431     if (full) {
 432       increment_total_full_collections();
 433     }
 434   }
 435 
 436   void increment_total_full_collections() { _total_full_collections++; }
 437 
 438   // Return the CollectorPolicy for the heap
 439   virtual CollectorPolicy* collector_policy() const = 0;
 440 
 441   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
 442   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
 443 
 444   // Iterate over all objects, calling "cl.do_object" on each.
 445   virtual void object_iterate(ObjectClosure* cl) = 0;
 446 
 447   // Similar to object_iterate() except iterates only
 448   // over live objects.
 449   virtual void safe_object_iterate(ObjectClosure* cl) = 0;
 450 
 451   // NOTE! There is no requirement that a collector implement these
 452   // functions.
 453   //
 454   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
 455   // each address in the (reserved) heap is a member of exactly
 456   // one block.  The defining characteristic of a block is that it is
 457   // possible to find its size, and thus to progress forward to the next
 458   // block.  (Blocks may be of different sizes.)  Thus, blocks may
 459   // represent Java objects, or they might be free blocks in a
 460   // free-list-based heap (or subheap), as long as the two kinds are
 461   // distinguishable and the size of each is determinable.
 462 
 463   // Returns the address of the start of the "block" that contains the
 464   // address "addr".  We say "blocks" instead of "object" since some heaps
 465   // may not pack objects densely; a chunk may either be an object or a
 466   // non-object.
 467   virtual HeapWord* block_start(const void* addr) const = 0;
 468 
 469   // Requires "addr" to be the start of a chunk, and returns its size.
 470   // "addr + size" is required to be the start of a new chunk, or the end
 471   // of the active area of the heap.
 472   virtual size_t block_size(const HeapWord* addr) const = 0;
 473 
 474   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 475   // the block is an object.
 476   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 477 
 478   // Returns the longest time (in ms) that has elapsed since the last
 479   // time that any part of the heap was examined by a garbage collection.
 480   virtual jlong millis_since_last_gc() = 0;
 481 
 482   // Perform any cleanup actions necessary before allowing a verification.
 483   virtual void prepare_for_verify() = 0;
 484 
 485   // Generate any dumps preceding or following a full gc
 486  private:
 487   void full_gc_dump(GCTimer* timer, bool before);
 488 
 489   virtual void initialize_serviceability() = 0;
 490 
 491  public:
 492   void pre_full_gc_dump(GCTimer* timer);
 493   void post_full_gc_dump(GCTimer* timer);
 494 
 495   virtual VirtualSpaceSummary create_heap_space_summary();
 496   GCHeapSummary create_heap_summary();
 497 
 498   MetaspaceSummary create_metaspace_summary();
 499 
 500   // Print heap information on the given outputStream.
 501   virtual void print_on(outputStream* st) const = 0;
 502   // The default behavior is to call print_on() on tty.
 503   virtual void print() const {
 504     print_on(tty);
 505   }
 506   // Print more detailed heap information on the given
 507   // outputStream. The default behavior is to call print_on(). It is
 508   // up to each subclass to override it and add any additional output
 509   // it needs.
 510   virtual void print_extended_on(outputStream* st) const {
 511     print_on(st);
 512   }
 513 
 514   virtual void print_on_error(outputStream* st) const;
 515 
 516   // Print all GC threads (other than the VM thread)
 517   // used by this heap.
 518   virtual void print_gc_threads_on(outputStream* st) const = 0;
 519   // The default behavior is to call print_gc_threads_on() on tty.
 520   void print_gc_threads() {
 521     print_gc_threads_on(tty);
 522   }
 523   // Iterator for all GC threads (other than VM thread)
 524   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
 525 
 526   // Print any relevant tracing info that flags imply.
 527   // Default implementation does nothing.
 528   virtual void print_tracing_info() const = 0;
 529 
 530   void print_heap_before_gc();
 531   void print_heap_after_gc();
 532 
 533   // An object is scavengable if its location may move during a scavenge.
 534   // (A scavenge is a GC which is not a full GC.)
 535   virtual bool is_scavengable(oop obj) = 0;
 536   // Registering and unregistering an nmethod (compiled code) with the heap.
 537   // Override with specific mechanism for each specialized heap type.
 538   virtual void register_nmethod(nmethod* nm) {}
 539   virtual void unregister_nmethod(nmethod* nm) {}
 540   virtual void verify_nmethod(nmethod* nmethod) {}
 541 
 542   void trace_heap_before_gc(const GCTracer* gc_tracer);
 543   void trace_heap_after_gc(const GCTracer* gc_tracer);
 544 
 545   // Heap verification
 546   virtual void verify(VerifyOption option) = 0;
 547 
 548   // Return true if concurrent phase control (via
 549   // request_concurrent_phase_control) is supported by this collector.
 550   // The default implementation returns false.
 551   virtual bool supports_concurrent_phase_control() const;
 552 
 553   // Return a NULL terminated array of concurrent phase names provided
 554   // by this collector.  Supports Whitebox testing.  These are the
 555   // names recognized by request_concurrent_phase(). The default
 556   // implementation returns an array of one NULL element.
 557   virtual const char* const* concurrent_phases() const;
 558 
 559   // Request the collector enter the indicated concurrent phase, and
 560   // wait until it does so.  Supports WhiteBox testing.  Only one
 561   // request may be active at a time.  Phases are designated by name;
 562   // the set of names and their meaning is GC-specific.  Once the
 563   // requested phase has been reached, the collector will attempt to
 564   // avoid transitioning to a new phase until a new request is made.
 565   // [Note: A collector might not be able to remain in a given phase.
 566   // For example, a full collection might cancel an in-progress
 567   // concurrent collection.]
 568   //
 569   // Returns true when the phase is reached.  Returns false for an
 570   // unknown phase.  The default implementation returns false.
 571   virtual bool request_concurrent_phase(const char* phase);
 572 
 573   // Provides a thread pool to SafepointSynchronize to use
 574   // for parallel safepoint cleanup.
 575   // GCs that use a GC worker thread pool may want to share
 576   // it for use during safepoint cleanup. This is only possible
 577   // if the GC can pause and resume concurrent work (e.g. G1
 578   // concurrent marking) for an intermittent non-GC safepoint.
 579   // If this method returns NULL, SafepointSynchronize will
 580   // perform cleanup tasks serially in the VMThread.
 581   virtual WorkGang* get_safepoint_workers() { return NULL; }
 582 
 583   // Non product verification and debugging.
 584 #ifndef PRODUCT
 585   // Support for PromotionFailureALot.  Return true if it's time to cause a
 586   // promotion failure.  The no-argument version uses
 587   // this->_promotion_failure_alot_count as the counter.
 588   inline bool promotion_should_fail(volatile size_t* count);
 589   inline bool promotion_should_fail();
 590 
 591   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 592   // GC in which promotion failure occurred.
 593   inline void reset_promotion_should_fail(volatile size_t* count);
 594   inline void reset_promotion_should_fail();
 595 #endif  // #ifndef PRODUCT
 596 
 597 #ifdef ASSERT
 598   static int fired_fake_oom() {
 599     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 600   }
 601 #endif
 602 
 603  public:
 604   // Copy the current allocation context statistics for the specified contexts.
 605   // For each context in contexts, set the corresponding entries in the totals
 606   // and accuracy arrays to the current values held by the statistics.  Each
 607   // array should be of length len.
 608   // Returns true if there are more stats available.
 609   virtual bool copy_allocation_context_stats(const jint* contexts,
 610                                              jlong* totals,
 611                                              jbyte* accuracy,
 612                                              jint len) {
 613     return false;
 614   }
 615 
 616 };
 617 
 618 // Class to set and reset the GC cause for a CollectedHeap.
 619 
 620 class GCCauseSetter : StackObj {
 621   CollectedHeap* _heap;
 622   GCCause::Cause _previous_cause;
 623  public:
 624   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
 625     assert(SafepointSynchronize::is_at_safepoint(),
 626            "This method manipulates heap state without locking");
 627     _heap = heap;
 628     _previous_cause = _heap->gc_cause();
 629     _heap->set_gc_cause(cause);
 630   }
 631 
 632   ~GCCauseSetter() {
 633     assert(SafepointSynchronize::is_at_safepoint(),
 634           "This method manipulates heap state without locking");
 635     _heap->set_gc_cause(_previous_cause);
 636   }
 637 };
 638 
 639 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP