1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
 26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
 27 
 28 #include "gc/shared/gcCause.hpp"
 29 #include "gc/shared/gcWhen.hpp"
 30 #include "memory/allocation.hpp"
 31 #include "runtime/handles.hpp"
 32 #include "runtime/perfData.hpp"
 33 #include "runtime/safepoint.hpp"
 34 #include "services/memoryUsage.hpp"
 35 #include "utilities/debug.hpp"
 36 #include "utilities/events.hpp"
 37 #include "utilities/formatBuffer.hpp"
 38 #include "utilities/growableArray.hpp"
 39 
 40 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
 41 // is an abstract class: there may be many different kinds of heaps.  This
 42 // class defines the functions that a heap must implement, and contains
 43 // infrastructure common to all heaps.
 44 
 45 class AdaptiveSizePolicy;
 46 class BarrierSet;
 47 class CollectorPolicy;
 48 class GCHeapSummary;
 49 class GCTimer;
 50 class GCTracer;
 51 class GCMemoryManager;
 52 class MemoryPool;
 53 class MetaspaceSummary;
 54 class SoftRefPolicy;
 55 class Thread;
 56 class ThreadClosure;
 57 class VirtualSpaceSummary;
 58 class WorkGang;
 59 class nmethod;
 60 
 61 class GCMessage : public FormatBuffer<1024> {
 62  public:
 63   bool is_before;
 64 
 65  public:
 66   GCMessage() {}
 67 };
 68 
 69 class CollectedHeap;
 70 
 71 class GCHeapLog : public EventLogBase<GCMessage> {
 72  private:
 73   void log_heap(CollectedHeap* heap, bool before);
 74 
 75  public:
 76   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
 77 
 78   void log_heap_before(CollectedHeap* heap) {
 79     log_heap(heap, true);
 80   }
 81   void log_heap_after(CollectedHeap* heap) {
 82     log_heap(heap, false);
 83   }
 84 };
 85 
 86 //
 87 // CollectedHeap
 88 //   GenCollectedHeap
 89 //     SerialHeap
 90 //     CMSHeap
 91 //   G1CollectedHeap
 92 //   ParallelScavengeHeap
 93 //   ZCollectedHeap
 94 //
 95 class CollectedHeap : public CHeapObj<mtInternal> {
 96   friend class VMStructs;
 97   friend class JVMCIVMStructs;
 98   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 99   friend class MemAllocator;
100 
101  private:
102 #ifdef ASSERT
103   static int       _fire_out_of_memory_count;
104 #endif
105 
106   GCHeapLog* _gc_heap_log;
107 
108   MemRegion _reserved;
109 
110  protected:
111   bool _is_gc_active;
112 
113   // Used for filler objects (static, but initialized in ctor).
114   static size_t _filler_array_max_size;
115 
116   unsigned int _total_collections;          // ... started
117   unsigned int _total_full_collections;     // ... started
118   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
119   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
120 
121   // Reason for current garbage collection.  Should be set to
122   // a value reflecting no collection between collections.
123   GCCause::Cause _gc_cause;
124   GCCause::Cause _gc_lastcause;
125   PerfStringVariable* _perf_gc_cause;
126   PerfStringVariable* _perf_gc_lastcause;
127 
128   // Constructor
129   CollectedHeap();
130 
131   // Create a new tlab. All TLAB allocations must go through this.
132   // To allow more flexible TLAB allocations min_size specifies
133   // the minimum size needed, while requested_size is the requested
134   // size based on ergonomics. The actually allocated size will be
135   // returned in actual_size.
136   virtual HeapWord* allocate_new_tlab(size_t min_size,
137                                       size_t requested_size,
138                                       size_t* actual_size);
139 
140   // Reinitialize tlabs before resuming mutators.
141   virtual void resize_all_tlabs();
142 
143   // Raw memory allocation facilities
144   // The obj and array allocate methods are covers for these methods.
145   // mem_allocate() should never be
146   // called to allocate TLABs, only individual objects.
147   virtual HeapWord* mem_allocate(size_t size,
148                                  bool* gc_overhead_limit_was_exceeded) = 0;
149 
150   // Filler object utilities.
151   static inline size_t filler_array_hdr_size();
152   static inline size_t filler_array_min_size();
153 
154   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
155   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
156 
157   // Fill with a single array; caller must ensure filler_array_min_size() <=
158   // words <= filler_array_max_size().
159   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
160 
161   // Fill with a single object (either an int array or a java.lang.Object).
162   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
163 
164   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
165 
166   // Verification functions
167   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
168     PRODUCT_RETURN;
169   debug_only(static void check_for_valid_allocation_state();)
170 
171  public:
172   enum Name {
173     None,
174     Serial,
175     Parallel,
176     CMS,
177     G1,
178     Epsilon,
179     Z
180   };
181 
182   static inline size_t filler_array_max_size() {
183     return _filler_array_max_size;
184   }
185 
186   virtual Name kind() const = 0;
187 
188   virtual const char* name() const = 0;
189 
190   /**
191    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
192    * and JNI_OK on success.
193    */
194   virtual jint initialize() = 0;
195 
196   // In many heaps, there will be a need to perform some initialization activities
197   // after the Universe is fully formed, but before general heap allocation is allowed.
198   // This is the correct place to place such initialization methods.
199   virtual void post_initialize();
200 
201   // Stop any onging concurrent work and prepare for exit.
202   virtual void stop() {}
203 
204   // Stop and resume concurrent GC threads interfering with safepoint operations
205   virtual void safepoint_synchronize_begin() {}
206   virtual void safepoint_synchronize_end() {}
207 
208   void initialize_reserved_region(HeapWord *start, HeapWord *end);
209   MemRegion reserved_region() const { return _reserved; }
210   address base() const { return (address)reserved_region().start(); }
211 
212   virtual size_t capacity() const = 0;
213   virtual size_t used() const = 0;
214 
215   // Return "true" if the part of the heap that allocates Java
216   // objects has reached the maximal committed limit that it can
217   // reach, without a garbage collection.
218   virtual bool is_maximal_no_gc() const = 0;
219 
220   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
221   // memory that the vm could make available for storing 'normal' java objects.
222   // This is based on the reserved address space, but should not include space
223   // that the vm uses internally for bookkeeping or temporary storage
224   // (e.g., in the case of the young gen, one of the survivor
225   // spaces).
226   virtual size_t max_capacity() const = 0;
227 
228   // Returns "TRUE" if "p" points into the reserved area of the heap.
229   bool is_in_reserved(const void* p) const {
230     return _reserved.contains(p);
231   }
232 
233   bool is_in_reserved_or_null(const void* p) const {
234     return p == NULL || is_in_reserved(p);
235   }
236 
237   // Returns "TRUE" iff "p" points into the committed areas of the heap.
238   // This method can be expensive so avoid using it in performance critical
239   // code.
240   virtual bool is_in(const void* p) const = 0;
241 
242   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
243 
244   // Let's define some terms: a "closed" subset of a heap is one that
245   //
246   // 1) contains all currently-allocated objects, and
247   //
248   // 2) is closed under reference: no object in the closed subset
249   //    references one outside the closed subset.
250   //
251   // Membership in a heap's closed subset is useful for assertions.
252   // Clearly, the entire heap is a closed subset, so the default
253   // implementation is to use "is_in_reserved".  But this may not be too
254   // liberal to perform useful checking.  Also, the "is_in" predicate
255   // defines a closed subset, but may be too expensive, since "is_in"
256   // verifies that its argument points to an object head.  The
257   // "closed_subset" method allows a heap to define an intermediate
258   // predicate, allowing more precise checking than "is_in_reserved" at
259   // lower cost than "is_in."
260 
261   // One important case is a heap composed of disjoint contiguous spaces,
262   // such as the Garbage-First collector.  Such heaps have a convenient
263   // closed subset consisting of the allocated portions of those
264   // contiguous spaces.
265 
266   // Return "TRUE" iff the given pointer points into the heap's defined
267   // closed subset (which defaults to the entire heap).
268   virtual bool is_in_closed_subset(const void* p) const {
269     return is_in_reserved(p);
270   }
271 
272   bool is_in_closed_subset_or_null(const void* p) const {
273     return p == NULL || is_in_closed_subset(p);
274   }
275 
276   void set_gc_cause(GCCause::Cause v) {
277      if (UsePerfData) {
278        _gc_lastcause = _gc_cause;
279        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
280        _perf_gc_cause->set_value(GCCause::to_string(v));
281      }
282     _gc_cause = v;
283   }
284   GCCause::Cause gc_cause() { return _gc_cause; }
285 
286   virtual oop obj_allocate(Klass* klass, int size, TRAPS);
287   virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
288   virtual oop class_allocate(Klass* klass, int size, TRAPS);
289 
290   // Utilities for turning raw memory into filler objects.
291   //
292   // min_fill_size() is the smallest region that can be filled.
293   // fill_with_objects() can fill arbitrary-sized regions of the heap using
294   // multiple objects.  fill_with_object() is for regions known to be smaller
295   // than the largest array of integers; it uses a single object to fill the
296   // region and has slightly less overhead.
297   static size_t min_fill_size() {
298     return size_t(align_object_size(oopDesc::header_size()));
299   }
300 
301   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
302 
303   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
304   static void fill_with_object(MemRegion region, bool zap = true) {
305     fill_with_object(region.start(), region.word_size(), zap);
306   }
307   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
308     fill_with_object(start, pointer_delta(end, start), zap);
309   }
310 
311   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
312   virtual size_t min_dummy_object_size() const;
313   size_t tlab_alloc_reserve() const;
314 
315   // Return the address "addr" aligned by "alignment_in_bytes" if such
316   // an address is below "end".  Return NULL otherwise.
317   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
318                                                    HeapWord* end,
319                                                    unsigned short alignment_in_bytes);
320 
321   // Some heaps may offer a contiguous region for shared non-blocking
322   // allocation, via inlined code (by exporting the address of the top and
323   // end fields defining the extent of the contiguous allocation region.)
324 
325   // This function returns "true" iff the heap supports this kind of
326   // allocation.  (Default is "no".)
327   virtual bool supports_inline_contig_alloc() const {
328     return false;
329   }
330   // These functions return the addresses of the fields that define the
331   // boundaries of the contiguous allocation area.  (These fields should be
332   // physically near to one another.)
333   virtual HeapWord* volatile* top_addr() const {
334     guarantee(false, "inline contiguous allocation not supported");
335     return NULL;
336   }
337   virtual HeapWord** end_addr() const {
338     guarantee(false, "inline contiguous allocation not supported");
339     return NULL;
340   }
341 
342   // Some heaps may be in an unparseable state at certain times between
343   // collections. This may be necessary for efficient implementation of
344   // certain allocation-related activities. Calling this function before
345   // attempting to parse a heap ensures that the heap is in a parsable
346   // state (provided other concurrent activity does not introduce
347   // unparsability). It is normally expected, therefore, that this
348   // method is invoked with the world stopped.
349   // NOTE: if you override this method, make sure you call
350   // super::ensure_parsability so that the non-generational
351   // part of the work gets done. See implementation of
352   // CollectedHeap::ensure_parsability and, for instance,
353   // that of GenCollectedHeap::ensure_parsability().
354   // The argument "retire_tlabs" controls whether existing TLABs
355   // are merely filled or also retired, thus preventing further
356   // allocation from them and necessitating allocation of new TLABs.
357   virtual void ensure_parsability(bool retire_tlabs);
358 
359   // Section on thread-local allocation buffers (TLABs)
360   // If the heap supports thread-local allocation buffers, it should override
361   // the following methods:
362   // Returns "true" iff the heap supports thread-local allocation buffers.
363   // The default is "no".
364   virtual bool supports_tlab_allocation() const = 0;
365 
366   // The amount of space available for thread-local allocation buffers.
367   virtual size_t tlab_capacity(Thread *thr) const = 0;
368 
369   // The amount of used space for thread-local allocation buffers for the given thread.
370   virtual size_t tlab_used(Thread *thr) const = 0;
371 
372   virtual size_t max_tlab_size() const;
373 
374   // An estimate of the maximum allocation that could be performed
375   // for thread-local allocation buffers without triggering any
376   // collection or expansion activity.
377   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
378     guarantee(false, "thread-local allocation buffers not supported");
379     return 0;
380   }
381 
382   // Perform a collection of the heap; intended for use in implementing
383   // "System.gc".  This probably implies as full a collection as the
384   // "CollectedHeap" supports.
385   virtual void collect(GCCause::Cause cause) = 0;
386 
387   // Perform a full collection
388   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
389 
390   // This interface assumes that it's being called by the
391   // vm thread. It collects the heap assuming that the
392   // heap lock is already held and that we are executing in
393   // the context of the vm thread.
394   virtual void collect_as_vm_thread(GCCause::Cause cause);
395 
396   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
397                                                        size_t size,
398                                                        Metaspace::MetadataType mdtype);
399 
400   // Returns "true" iff there is a stop-world GC in progress.  (I assume
401   // that it should answer "false" for the concurrent part of a concurrent
402   // collector -- dld).
403   bool is_gc_active() const { return _is_gc_active; }
404 
405   // Total number of GC collections (started)
406   unsigned int total_collections() const { return _total_collections; }
407   unsigned int total_full_collections() const { return _total_full_collections;}
408 
409   // Increment total number of GC collections (started)
410   // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
411   void increment_total_collections(bool full = false) {
412     _total_collections++;
413     if (full) {
414       increment_total_full_collections();
415     }
416   }
417 
418   void increment_total_full_collections() { _total_full_collections++; }
419 
420   // Return the CollectorPolicy for the heap
421   virtual CollectorPolicy* collector_policy() const = 0;
422 
423   // Return the SoftRefPolicy for the heap;
424   virtual SoftRefPolicy* soft_ref_policy() = 0;
425 
426   virtual MemoryUsage memory_usage();
427   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
428   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
429 
430   // Iterate over all objects, calling "cl.do_object" on each.
431   virtual void object_iterate(ObjectClosure* cl) = 0;
432 
433   // Similar to object_iterate() except iterates only
434   // over live objects.
435   virtual void safe_object_iterate(ObjectClosure* cl) = 0;
436 
437   // NOTE! There is no requirement that a collector implement these
438   // functions.
439   //
440   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
441   // each address in the (reserved) heap is a member of exactly
442   // one block.  The defining characteristic of a block is that it is
443   // possible to find its size, and thus to progress forward to the next
444   // block.  (Blocks may be of different sizes.)  Thus, blocks may
445   // represent Java objects, or they might be free blocks in a
446   // free-list-based heap (or subheap), as long as the two kinds are
447   // distinguishable and the size of each is determinable.
448 
449   // Returns the address of the start of the "block" that contains the
450   // address "addr".  We say "blocks" instead of "object" since some heaps
451   // may not pack objects densely; a chunk may either be an object or a
452   // non-object.
453   virtual HeapWord* block_start(const void* addr) const = 0;
454 
455   // Requires "addr" to be the start of a chunk, and returns its size.
456   // "addr + size" is required to be the start of a new chunk, or the end
457   // of the active area of the heap.
458   virtual size_t block_size(const HeapWord* addr) const = 0;
459 
460   // Requires "addr" to be the start of a block, and returns "TRUE" iff
461   // the block is an object.
462   virtual bool block_is_obj(const HeapWord* addr) const = 0;
463 
464   // Returns the longest time (in ms) that has elapsed since the last
465   // time that any part of the heap was examined by a garbage collection.
466   virtual jlong millis_since_last_gc() = 0;
467 
468   // Perform any cleanup actions necessary before allowing a verification.
469   virtual void prepare_for_verify() = 0;
470 
471   // Generate any dumps preceding or following a full gc
472  private:
473   void full_gc_dump(GCTimer* timer, bool before);
474 
475   virtual void initialize_serviceability() = 0;
476 
477  public:
478   void pre_full_gc_dump(GCTimer* timer);
479   void post_full_gc_dump(GCTimer* timer);
480 
481   virtual VirtualSpaceSummary create_heap_space_summary();
482   GCHeapSummary create_heap_summary();
483 
484   MetaspaceSummary create_metaspace_summary();
485 
486   // Print heap information on the given outputStream.
487   virtual void print_on(outputStream* st) const = 0;
488   // The default behavior is to call print_on() on tty.
489   virtual void print() const {
490     print_on(tty);
491   }
492   // Print more detailed heap information on the given
493   // outputStream. The default behavior is to call print_on(). It is
494   // up to each subclass to override it and add any additional output
495   // it needs.
496   virtual void print_extended_on(outputStream* st) const {
497     print_on(st);
498   }
499 
500   virtual void print_on_error(outputStream* st) const;
501 
502   // Print all GC threads (other than the VM thread)
503   // used by this heap.
504   virtual void print_gc_threads_on(outputStream* st) const = 0;
505   // The default behavior is to call print_gc_threads_on() on tty.
506   void print_gc_threads() {
507     print_gc_threads_on(tty);
508   }
509   // Iterator for all GC threads (other than VM thread)
510   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
511 
512   // Print any relevant tracing info that flags imply.
513   // Default implementation does nothing.
514   virtual void print_tracing_info() const = 0;
515 
516   void print_heap_before_gc();
517   void print_heap_after_gc();
518 
519   // An object is scavengable if its location may move during a scavenge.
520   // (A scavenge is a GC which is not a full GC.)
521   virtual bool is_scavengable(oop obj) = 0;
522   // Registering and unregistering an nmethod (compiled code) with the heap.
523   // Override with specific mechanism for each specialized heap type.
524   virtual void register_nmethod(nmethod* nm) {}
525   virtual void unregister_nmethod(nmethod* nm) {}
526   virtual void verify_nmethod(nmethod* nmethod) {}
527 
528   void trace_heap_before_gc(const GCTracer* gc_tracer);
529   void trace_heap_after_gc(const GCTracer* gc_tracer);
530 
531   // Heap verification
532   virtual void verify(VerifyOption option) = 0;
533 
534   // Return true if concurrent phase control (via
535   // request_concurrent_phase_control) is supported by this collector.
536   // The default implementation returns false.
537   virtual bool supports_concurrent_phase_control() const;
538 
539   // Return a NULL terminated array of concurrent phase names provided
540   // by this collector.  Supports Whitebox testing.  These are the
541   // names recognized by request_concurrent_phase(). The default
542   // implementation returns an array of one NULL element.
543   virtual const char* const* concurrent_phases() const;
544 
545   // Request the collector enter the indicated concurrent phase, and
546   // wait until it does so.  Supports WhiteBox testing.  Only one
547   // request may be active at a time.  Phases are designated by name;
548   // the set of names and their meaning is GC-specific.  Once the
549   // requested phase has been reached, the collector will attempt to
550   // avoid transitioning to a new phase until a new request is made.
551   // [Note: A collector might not be able to remain in a given phase.
552   // For example, a full collection might cancel an in-progress
553   // concurrent collection.]
554   //
555   // Returns true when the phase is reached.  Returns false for an
556   // unknown phase.  The default implementation returns false.
557   virtual bool request_concurrent_phase(const char* phase);
558 
559   // Provides a thread pool to SafepointSynchronize to use
560   // for parallel safepoint cleanup.
561   // GCs that use a GC worker thread pool may want to share
562   // it for use during safepoint cleanup. This is only possible
563   // if the GC can pause and resume concurrent work (e.g. G1
564   // concurrent marking) for an intermittent non-GC safepoint.
565   // If this method returns NULL, SafepointSynchronize will
566   // perform cleanup tasks serially in the VMThread.
567   virtual WorkGang* get_safepoint_workers() { return NULL; }
568 
569   // Support for object pinning. This is used by JNI Get*Critical()
570   // and Release*Critical() family of functions. If supported, the GC
571   // must guarantee that pinned objects never move.
572   virtual bool supports_object_pinning() const;
573   virtual oop pin_object(JavaThread* thread, oop obj);
574   virtual void unpin_object(JavaThread* thread, oop obj);
575 
576   // Deduplicate the string, iff the GC supports string deduplication.
577   virtual void deduplicate_string(oop str);
578 
579   virtual bool is_oop(oop object) const;
580 
581   virtual size_t obj_size(oop obj) const;
582 
583   // Non product verification and debugging.
584 #ifndef PRODUCT
585   // Support for PromotionFailureALot.  Return true if it's time to cause a
586   // promotion failure.  The no-argument version uses
587   // this->_promotion_failure_alot_count as the counter.
588   bool promotion_should_fail(volatile size_t* count);
589   bool promotion_should_fail();
590 
591   // Reset the PromotionFailureALot counters.  Should be called at the end of a
592   // GC in which promotion failure occurred.
593   void reset_promotion_should_fail(volatile size_t* count);
594   void reset_promotion_should_fail();
595 #endif  // #ifndef PRODUCT
596 
597 #ifdef ASSERT
598   static int fired_fake_oom() {
599     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
600   }
601 #endif
602 };
603 
604 // Class to set and reset the GC cause for a CollectedHeap.
605 
606 class GCCauseSetter : StackObj {
607   CollectedHeap* _heap;
608   GCCause::Cause _previous_cause;
609  public:
610   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
611     _heap = heap;
612     _previous_cause = _heap->gc_cause();
613     _heap->set_gc_cause(cause);
614   }
615 
616   ~GCCauseSetter() {
617     _heap->set_gc_cause(_previous_cause);
618   }
619 };
620 
621 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP