1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
  27 
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/gcWhen.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/perfData.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "utilities/events.hpp"
  35 
  36 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
  37 // is an abstract class: there may be many different kinds of heaps.  This
  38 // class defines the functions that a heap must implement, and contains
  39 // infrastructure common to all heaps.
  40 
  41 class AdaptiveSizePolicy;
  42 class BarrierSet;
  43 class CollectorPolicy;
  44 class GCHeapSummary;
  45 class GCTimer;
  46 class GCTracer;
  47 class MetaspaceSummary;
  48 class Thread;
  49 class ThreadClosure;
  50 class VirtualSpaceSummary;
  51 class WorkGang;
  52 class nmethod;
  53 
  54 class GCMessage : public FormatBuffer<1024> {
  55  public:
  56   bool is_before;
  57 
  58  public:
  59   GCMessage() {}
  60 };
  61 
  62 class CollectedHeap;
  63 
  64 class GCHeapLog : public EventLogBase<GCMessage> {
  65  private:
  66   void log_heap(CollectedHeap* heap, bool before);
  67 
  68  public:
  69   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  70 
  71   void log_heap_before(CollectedHeap* heap) {
  72     log_heap(heap, true);
  73   }
  74   void log_heap_after(CollectedHeap* heap) {
  75     log_heap(heap, false);
  76   }
  77 };
  78 
  79 //
  80 // CollectedHeap
  81 //   GenCollectedHeap
  82 //   G1CollectedHeap
  83 //   ParallelScavengeHeap
  84 //
  85 class CollectedHeap : public CHeapObj<mtInternal> {
  86   friend class VMStructs;
  87   friend class JVMCIVMStructs;
  88   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  89 
  90  private:
  91 #ifdef ASSERT
  92   static int       _fire_out_of_memory_count;
  93 #endif
  94 
  95   GCHeapLog* _gc_heap_log;
  96 
  97   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
  98   // or INCLUDE_JVMCI is being used
  99   bool _defer_initial_card_mark;
 100 
 101   MemRegion _reserved;
 102 
 103  protected:
 104   BarrierSet* _barrier_set;
 105   bool _is_gc_active;
 106 
 107   // Used for filler objects (static, but initialized in ctor).
 108   static size_t _filler_array_max_size;
 109 
 110   unsigned int _total_collections;          // ... started
 111   unsigned int _total_full_collections;     // ... started
 112   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
 113   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
 114 
 115   // Reason for current garbage collection.  Should be set to
 116   // a value reflecting no collection between collections.
 117   GCCause::Cause _gc_cause;
 118   GCCause::Cause _gc_lastcause;
 119   PerfStringVariable* _perf_gc_cause;
 120   PerfStringVariable* _perf_gc_lastcause;
 121 
 122   // Constructor
 123   CollectedHeap();
 124 
 125   // Do common initializations that must follow instance construction,
 126   // for example, those needing virtual calls.
 127   // This code could perhaps be moved into initialize() but would
 128   // be slightly more awkward because we want the latter to be a
 129   // pure virtual.
 130   void pre_initialize();
 131 
 132   // Create a new tlab. All TLAB allocations must go through this.
 133   virtual HeapWord* allocate_new_tlab(size_t size);
 134 
 135   // Accumulate statistics on all tlabs.
 136   virtual void accumulate_statistics_all_tlabs();
 137 
 138   // Reinitialize tlabs before resuming mutators.
 139   virtual void resize_all_tlabs();
 140 
 141   // Allocate from the current thread's TLAB, with broken-out slow path.
 142   inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
 143   static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
 144 
 145   // Allocate an uninitialized block of the given size, or returns NULL if
 146   // this is impossible.
 147   inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
 148 
 149   // Like allocate_init, but the block returned by a successful allocation
 150   // is guaranteed initialized to zeros.
 151   inline static HeapWord* common_mem_allocate_init(Klass* klass, size_t size, TRAPS);
 152 
 153   // Helper functions for (VM) allocation.
 154   inline static void post_allocation_setup_common(Klass* klass, HeapWord* obj);
 155   inline static void post_allocation_setup_no_klass_install(Klass* klass,
 156                                                             HeapWord* objPtr);
 157 
 158   inline static void post_allocation_setup_obj(Klass* klass, HeapWord* obj, int size);
 159 
 160   inline static void post_allocation_setup_array(Klass* klass,
 161                                                  HeapWord* obj, int length);
 162 
 163   inline static void post_allocation_setup_class(Klass* klass, HeapWord* obj, int size);
 164 
 165   // Clears an allocated object.
 166   inline static void init_obj(HeapWord* obj, size_t size);
 167 
 168   // Filler object utilities.
 169   static inline size_t filler_array_hdr_size();
 170   static inline size_t filler_array_min_size();
 171 
 172   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
 173   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
 174 
 175   // Fill with a single array; caller must ensure filler_array_min_size() <=
 176   // words <= filler_array_max_size().
 177   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
 178 
 179   // Fill with a single object (either an int array or a java.lang.Object).
 180   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 181 
 182   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 183 
 184   // Verification functions
 185   virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
 186     PRODUCT_RETURN;
 187   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
 188     PRODUCT_RETURN;
 189   debug_only(static void check_for_valid_allocation_state();)
 190 
 191  public:
 192   enum Name {
 193     GenCollectedHeap,
 194     ParallelScavengeHeap,
 195     G1CollectedHeap
 196   };
 197 
 198   static inline size_t filler_array_max_size() {
 199     return _filler_array_max_size;
 200   }
 201 
 202   virtual Name kind() const = 0;
 203 
 204   virtual const char* name() const = 0;
 205 
 206   /**
 207    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 208    * and JNI_OK on success.
 209    */
 210   virtual jint initialize() = 0;
 211 
 212   // In many heaps, there will be a need to perform some initialization activities
 213   // after the Universe is fully formed, but before general heap allocation is allowed.
 214   // This is the correct place to place such initialization methods.
 215   virtual void post_initialize() = 0;
 216 
 217   // Stop any onging concurrent work and prepare for exit.
 218   virtual void stop() {}
 219 
 220   void initialize_reserved_region(HeapWord *start, HeapWord *end);
 221   MemRegion reserved_region() const { return _reserved; }
 222   address base() const { return (address)reserved_region().start(); }
 223 
 224   virtual size_t capacity() const = 0;
 225   virtual size_t used() const = 0;
 226 
 227   // Return "true" if the part of the heap that allocates Java
 228   // objects has reached the maximal committed limit that it can
 229   // reach, without a garbage collection.
 230   virtual bool is_maximal_no_gc() const = 0;
 231 
 232   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
 233   // memory that the vm could make available for storing 'normal' java objects.
 234   // This is based on the reserved address space, but should not include space
 235   // that the vm uses internally for bookkeeping or temporary storage
 236   // (e.g., in the case of the young gen, one of the survivor
 237   // spaces).
 238   virtual size_t max_capacity() const = 0;
 239 
 240   // Returns "TRUE" if "p" points into the reserved area of the heap.
 241   bool is_in_reserved(const void* p) const {
 242     return _reserved.contains(p);
 243   }
 244 
 245   bool is_in_reserved_or_null(const void* p) const {
 246     return p == NULL || is_in_reserved(p);
 247   }
 248 
 249   // Returns "TRUE" iff "p" points into the committed areas of the heap.
 250   // This method can be expensive so avoid using it in performance critical
 251   // code.
 252   virtual bool is_in(const void* p) const = 0;
 253 
 254   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
 255 
 256   // Let's define some terms: a "closed" subset of a heap is one that
 257   //
 258   // 1) contains all currently-allocated objects, and
 259   //
 260   // 2) is closed under reference: no object in the closed subset
 261   //    references one outside the closed subset.
 262   //
 263   // Membership in a heap's closed subset is useful for assertions.
 264   // Clearly, the entire heap is a closed subset, so the default
 265   // implementation is to use "is_in_reserved".  But this may not be too
 266   // liberal to perform useful checking.  Also, the "is_in" predicate
 267   // defines a closed subset, but may be too expensive, since "is_in"
 268   // verifies that its argument points to an object head.  The
 269   // "closed_subset" method allows a heap to define an intermediate
 270   // predicate, allowing more precise checking than "is_in_reserved" at
 271   // lower cost than "is_in."
 272 
 273   // One important case is a heap composed of disjoint contiguous spaces,
 274   // such as the Garbage-First collector.  Such heaps have a convenient
 275   // closed subset consisting of the allocated portions of those
 276   // contiguous spaces.
 277 
 278   // Return "TRUE" iff the given pointer points into the heap's defined
 279   // closed subset (which defaults to the entire heap).
 280   virtual bool is_in_closed_subset(const void* p) const {
 281     return is_in_reserved(p);
 282   }
 283 
 284   bool is_in_closed_subset_or_null(const void* p) const {
 285     return p == NULL || is_in_closed_subset(p);
 286   }
 287 
 288   // An object is scavengable if its location may move during a scavenge.
 289   // (A scavenge is a GC which is not a full GC.)
 290   virtual bool is_scavengable(const void *p) = 0;
 291 
 292   void set_gc_cause(GCCause::Cause v) {
 293      if (UsePerfData) {
 294        _gc_lastcause = _gc_cause;
 295        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
 296        _perf_gc_cause->set_value(GCCause::to_string(v));
 297      }
 298     _gc_cause = v;
 299   }
 300   GCCause::Cause gc_cause() { return _gc_cause; }
 301 
 302   // General obj/array allocation facilities.
 303   inline static oop obj_allocate(Klass* klass, int size, TRAPS);
 304   inline static oop array_allocate(Klass* klass, int size, int length, TRAPS);
 305   inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
 306   inline static oop class_allocate(Klass* klass, int size, TRAPS);
 307 
 308   // Raw memory allocation facilities
 309   // The obj and array allocate methods are covers for these methods.
 310   // mem_allocate() should never be
 311   // called to allocate TLABs, only individual objects.
 312   virtual HeapWord* mem_allocate(size_t size,
 313                                  bool* gc_overhead_limit_was_exceeded) = 0;
 314 
 315   // Utilities for turning raw memory into filler objects.
 316   //
 317   // min_fill_size() is the smallest region that can be filled.
 318   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 319   // multiple objects.  fill_with_object() is for regions known to be smaller
 320   // than the largest array of integers; it uses a single object to fill the
 321   // region and has slightly less overhead.
 322   static size_t min_fill_size() {
 323     return size_t(align_object_size(oopDesc::header_size()));
 324   }
 325 
 326   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 327 
 328   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 329   static void fill_with_object(MemRegion region, bool zap = true) {
 330     fill_with_object(region.start(), region.word_size(), zap);
 331   }
 332   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 333     fill_with_object(start, pointer_delta(end, start), zap);
 334   }
 335 
 336   // Return the address "addr" aligned by "alignment_in_bytes" if such
 337   // an address is below "end".  Return NULL otherwise.
 338   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
 339                                                    HeapWord* end,
 340                                                    unsigned short alignment_in_bytes);
 341 
 342   // Some heaps may offer a contiguous region for shared non-blocking
 343   // allocation, via inlined code (by exporting the address of the top and
 344   // end fields defining the extent of the contiguous allocation region.)
 345 
 346   // This function returns "true" iff the heap supports this kind of
 347   // allocation.  (Default is "no".)
 348   virtual bool supports_inline_contig_alloc() const {
 349     return false;
 350   }
 351   // These functions return the addresses of the fields that define the
 352   // boundaries of the contiguous allocation area.  (These fields should be
 353   // physically near to one another.)
 354   virtual HeapWord* volatile* top_addr() const {
 355     guarantee(false, "inline contiguous allocation not supported");
 356     return NULL;
 357   }
 358   virtual HeapWord** end_addr() const {
 359     guarantee(false, "inline contiguous allocation not supported");
 360     return NULL;
 361   }
 362 
 363   // Some heaps may be in an unparseable state at certain times between
 364   // collections. This may be necessary for efficient implementation of
 365   // certain allocation-related activities. Calling this function before
 366   // attempting to parse a heap ensures that the heap is in a parsable
 367   // state (provided other concurrent activity does not introduce
 368   // unparsability). It is normally expected, therefore, that this
 369   // method is invoked with the world stopped.
 370   // NOTE: if you override this method, make sure you call
 371   // super::ensure_parsability so that the non-generational
 372   // part of the work gets done. See implementation of
 373   // CollectedHeap::ensure_parsability and, for instance,
 374   // that of GenCollectedHeap::ensure_parsability().
 375   // The argument "retire_tlabs" controls whether existing TLABs
 376   // are merely filled or also retired, thus preventing further
 377   // allocation from them and necessitating allocation of new TLABs.
 378   virtual void ensure_parsability(bool retire_tlabs);
 379 
 380   // Section on thread-local allocation buffers (TLABs)
 381   // If the heap supports thread-local allocation buffers, it should override
 382   // the following methods:
 383   // Returns "true" iff the heap supports thread-local allocation buffers.
 384   // The default is "no".
 385   virtual bool supports_tlab_allocation() const = 0;
 386 
 387   // The amount of space available for thread-local allocation buffers.
 388   virtual size_t tlab_capacity(Thread *thr) const = 0;
 389 
 390   // The amount of used space for thread-local allocation buffers for the given thread.
 391   virtual size_t tlab_used(Thread *thr) const = 0;
 392 
 393   virtual size_t max_tlab_size() const;
 394 
 395   // An estimate of the maximum allocation that could be performed
 396   // for thread-local allocation buffers without triggering any
 397   // collection or expansion activity.
 398   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 399     guarantee(false, "thread-local allocation buffers not supported");
 400     return 0;
 401   }
 402 
 403   // Can a compiler initialize a new object without store barriers?
 404   // This permission only extends from the creation of a new object
 405   // via a TLAB up to the first subsequent safepoint. If such permission
 406   // is granted for this heap type, the compiler promises to call
 407   // defer_store_barrier() below on any slow path allocation of
 408   // a new object for which such initializing store barriers will
 409   // have been elided.
 410   virtual bool can_elide_tlab_store_barriers() const = 0;
 411 
 412   // If a compiler is eliding store barriers for TLAB-allocated objects,
 413   // there is probably a corresponding slow path which can produce
 414   // an object allocated anywhere.  The compiler's runtime support
 415   // promises to call this function on such a slow-path-allocated
 416   // object before performing initializations that have elided
 417   // store barriers. Returns new_obj, or maybe a safer copy thereof.
 418   virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
 419 
 420   // Answers whether an initializing store to a new object currently
 421   // allocated at the given address doesn't need a store
 422   // barrier. Returns "true" if it doesn't need an initializing
 423   // store barrier; answers "false" if it does.
 424   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 425 
 426   // If a compiler is eliding store barriers for TLAB-allocated objects,
 427   // we will be informed of a slow-path allocation by a call
 428   // to new_store_pre_barrier() above. Such a call precedes the
 429   // initialization of the object itself, and no post-store-barriers will
 430   // be issued. Some heap types require that the barrier strictly follows
 431   // the initializing stores. (This is currently implemented by deferring the
 432   // barrier until the next slow-path allocation or gc-related safepoint.)
 433   // This interface answers whether a particular heap type needs the card
 434   // mark to be thus strictly sequenced after the stores.
 435   virtual bool card_mark_must_follow_store() const = 0;
 436 
 437   // If the CollectedHeap was asked to defer a store barrier above,
 438   // this informs it to flush such a deferred store barrier to the
 439   // remembered set.
 440   virtual void flush_deferred_store_barrier(JavaThread* thread);
 441 
 442   // Perform a collection of the heap; intended for use in implementing
 443   // "System.gc".  This probably implies as full a collection as the
 444   // "CollectedHeap" supports.
 445   virtual void collect(GCCause::Cause cause) = 0;
 446 
 447   // Perform a full collection
 448   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 449 
 450   // This interface assumes that it's being called by the
 451   // vm thread. It collects the heap assuming that the
 452   // heap lock is already held and that we are executing in
 453   // the context of the vm thread.
 454   virtual void collect_as_vm_thread(GCCause::Cause cause);
 455 
 456   // Returns the barrier set for this heap
 457   BarrierSet* barrier_set() { return _barrier_set; }
 458   void set_barrier_set(BarrierSet* barrier_set);
 459 
 460   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 461   // that it should answer "false" for the concurrent part of a concurrent
 462   // collector -- dld).
 463   bool is_gc_active() const { return _is_gc_active; }
 464 
 465   // Total number of GC collections (started)
 466   unsigned int total_collections() const { return _total_collections; }
 467   unsigned int total_full_collections() const { return _total_full_collections;}
 468 
 469   // Increment total number of GC collections (started)
 470   // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
 471   void increment_total_collections(bool full = false) {
 472     _total_collections++;
 473     if (full) {
 474       increment_total_full_collections();
 475     }
 476   }
 477 
 478   void increment_total_full_collections() { _total_full_collections++; }
 479 
 480   // Return the AdaptiveSizePolicy for the heap.
 481   virtual AdaptiveSizePolicy* size_policy() = 0;
 482 
 483   // Return the CollectorPolicy for the heap
 484   virtual CollectorPolicy* collector_policy() const = 0;
 485 
 486   // Iterate over all objects, calling "cl.do_object" on each.
 487   virtual void object_iterate(ObjectClosure* cl) = 0;
 488 
 489   // Similar to object_iterate() except iterates only
 490   // over live objects.
 491   virtual void safe_object_iterate(ObjectClosure* cl) = 0;
 492 
 493   // NOTE! There is no requirement that a collector implement these
 494   // functions.
 495   //
 496   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
 497   // each address in the (reserved) heap is a member of exactly
 498   // one block.  The defining characteristic of a block is that it is
 499   // possible to find its size, and thus to progress forward to the next
 500   // block.  (Blocks may be of different sizes.)  Thus, blocks may
 501   // represent Java objects, or they might be free blocks in a
 502   // free-list-based heap (or subheap), as long as the two kinds are
 503   // distinguishable and the size of each is determinable.
 504 
 505   // Returns the address of the start of the "block" that contains the
 506   // address "addr".  We say "blocks" instead of "object" since some heaps
 507   // may not pack objects densely; a chunk may either be an object or a
 508   // non-object.
 509   virtual HeapWord* block_start(const void* addr) const = 0;
 510 
 511   // Requires "addr" to be the start of a chunk, and returns its size.
 512   // "addr + size" is required to be the start of a new chunk, or the end
 513   // of the active area of the heap.
 514   virtual size_t block_size(const HeapWord* addr) const = 0;
 515 
 516   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 517   // the block is an object.
 518   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 519 
 520   // Returns the longest time (in ms) that has elapsed since the last
 521   // time that any part of the heap was examined by a garbage collection.
 522   virtual jlong millis_since_last_gc() = 0;
 523 
 524   // Perform any cleanup actions necessary before allowing a verification.
 525   virtual void prepare_for_verify() = 0;
 526 
 527   // Generate any dumps preceding or following a full gc
 528  private:
 529   void full_gc_dump(GCTimer* timer, bool before);
 530  public:
 531   void pre_full_gc_dump(GCTimer* timer);
 532   void post_full_gc_dump(GCTimer* timer);
 533 
 534   VirtualSpaceSummary create_heap_space_summary();
 535   GCHeapSummary create_heap_summary();
 536 
 537   MetaspaceSummary create_metaspace_summary();
 538 
 539   // Print heap information on the given outputStream.
 540   virtual void print_on(outputStream* st) const = 0;
 541   // The default behavior is to call print_on() on tty.
 542   virtual void print() const {
 543     print_on(tty);
 544   }
 545   // Print more detailed heap information on the given
 546   // outputStream. The default behavior is to call print_on(). It is
 547   // up to each subclass to override it and add any additional output
 548   // it needs.
 549   virtual void print_extended_on(outputStream* st) const {
 550     print_on(st);
 551   }
 552 
 553   virtual void print_on_error(outputStream* st) const;
 554 
 555   // Print all GC threads (other than the VM thread)
 556   // used by this heap.
 557   virtual void print_gc_threads_on(outputStream* st) const = 0;
 558   // The default behavior is to call print_gc_threads_on() on tty.
 559   void print_gc_threads() {
 560     print_gc_threads_on(tty);
 561   }
 562   // Iterator for all GC threads (other than VM thread)
 563   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
 564 
 565   // Print any relevant tracing info that flags imply.
 566   // Default implementation does nothing.
 567   virtual void print_tracing_info() const = 0;
 568 
 569   void print_heap_before_gc();
 570   void print_heap_after_gc();
 571 
 572   // Registering and unregistering an nmethod (compiled code) with the heap.
 573   // Override with specific mechanism for each specialized heap type.
 574   virtual void register_nmethod(nmethod* nm);
 575   virtual void unregister_nmethod(nmethod* nm);
 576 
 577   void trace_heap_before_gc(const GCTracer* gc_tracer);
 578   void trace_heap_after_gc(const GCTracer* gc_tracer);
 579 
 580   // Heap verification
 581   virtual void verify(VerifyOption option) = 0;
 582 
 583   // Return true if concurrent phase control (via
 584   // request_concurrent_phase_control) is supported by this collector.
 585   // The default implementation returns false.
 586   virtual bool supports_concurrent_phase_control() const;
 587 
 588   // Return a NULL terminated array of concurrent phase names provided
 589   // by this collector.  Supports Whitebox testing.  These are the
 590   // names recognized by request_concurrent_phase(). The default
 591   // implementation returns an array of one NULL element.
 592   virtual const char* const* concurrent_phases() const;
 593 
 594   // Request the collector enter the indicated concurrent phase, and
 595   // wait until it does so.  Supports WhiteBox testing.  Only one
 596   // request may be active at a time.  Phases are designated by name;
 597   // the set of names and their meaning is GC-specific.  Once the
 598   // requested phase has been reached, the collector will attempt to
 599   // avoid transitioning to a new phase until a new request is made.
 600   // [Note: A collector might not be able to remain in a given phase.
 601   // For example, a full collection might cancel an in-progress
 602   // concurrent collection.]
 603   //
 604   // Returns true when the phase is reached.  Returns false for an
 605   // unknown phase.  The default implementation returns false.
 606   virtual bool request_concurrent_phase(const char* phase);
 607 
 608   // Provides a thread pool to SafepointSynchronize to use
 609   // for parallel safepoint cleanup.
 610   // GCs that use a GC worker thread pool may want to share
 611   // it for use during safepoint cleanup. This is only possible
 612   // if the GC can pause and resume concurrent work (e.g. G1
 613   // concurrent marking) for an intermittent non-GC safepoint.
 614   // If this method returns NULL, SafepointSynchronize will
 615   // perform cleanup tasks serially in the VMThread.
 616   virtual WorkGang* get_safepoint_workers() { return NULL; }
 617 
 618   // Non product verification and debugging.
 619 #ifndef PRODUCT
 620   // Support for PromotionFailureALot.  Return true if it's time to cause a
 621   // promotion failure.  The no-argument version uses
 622   // this->_promotion_failure_alot_count as the counter.
 623   inline bool promotion_should_fail(volatile size_t* count);
 624   inline bool promotion_should_fail();
 625 
 626   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 627   // GC in which promotion failure occurred.
 628   inline void reset_promotion_should_fail(volatile size_t* count);
 629   inline void reset_promotion_should_fail();
 630 #endif  // #ifndef PRODUCT
 631 
 632 #ifdef ASSERT
 633   static int fired_fake_oom() {
 634     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 635   }
 636 #endif
 637 
 638  public:
 639   // Copy the current allocation context statistics for the specified contexts.
 640   // For each context in contexts, set the corresponding entries in the totals
 641   // and accuracy arrays to the current values held by the statistics.  Each
 642   // array should be of length len.
 643   // Returns true if there are more stats available.
 644   virtual bool copy_allocation_context_stats(const jint* contexts,
 645                                              jlong* totals,
 646                                              jbyte* accuracy,
 647                                              jint len) {
 648     return false;
 649   }
 650 
 651 };
 652 
 653 // Class to set and reset the GC cause for a CollectedHeap.
 654 
 655 class GCCauseSetter : StackObj {
 656   CollectedHeap* _heap;
 657   GCCause::Cause _previous_cause;
 658  public:
 659   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
 660     assert(SafepointSynchronize::is_at_safepoint(),
 661            "This method manipulates heap state without locking");
 662     _heap = heap;
 663     _previous_cause = _heap->gc_cause();
 664     _heap->set_gc_cause(cause);
 665   }
 666 
 667   ~GCCauseSetter() {
 668     assert(SafepointSynchronize::is_at_safepoint(),
 669           "This method manipulates heap state without locking");
 670     _heap->set_gc_cause(_previous_cause);
 671   }
 672 };
 673 
 674 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP