1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
 26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP
 27 
 28 #include "gc/shared/gcCause.hpp"
 29 #include "gc/shared/gcWhen.hpp"
 30 #include "memory/allocation.hpp"
 31 #include "runtime/handles.hpp"
 32 #include "runtime/perfData.hpp"
 33 #include "runtime/safepoint.hpp"
 34 #include "utilities/debug.hpp"
 35 #include "utilities/events.hpp"
 36 #include "utilities/formatBuffer.hpp"
 37 #include "utilities/growableArray.hpp"
 38 
 39 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
 40 // is an abstract class: there may be many different kinds of heaps.  This
 41 // class defines the functions that a heap must implement, and contains
 42 // infrastructure common to all heaps.
 43 
 44 class AdaptiveSizePolicy;
 45 class BarrierSet;
 46 class CollectorPolicy;
 47 class GCHeapSummary;
 48 class GCTimer;
 49 class GCTracer;
 50 class GCMemoryManager;
 51 class MemoryPool;
 52 class MetaspaceSummary;
 53 class SoftRefPolicy;
 54 class Thread;
 55 class ThreadClosure;
 56 class VirtualSpaceSummary;
 57 class WorkGang;
 58 class nmethod;
 59 
 60 class GCMessage : public FormatBuffer<1024> {
 61  public:
 62   bool is_before;
 63 
 64  public:
 65   GCMessage() {}
 66 };
 67 
 68 class CollectedHeap;
 69 
 70 class GCHeapLog : public EventLogBase<GCMessage> {
 71  private:
 72   void log_heap(CollectedHeap* heap, bool before);
 73 
 74  public:
 75   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
 76 
 77   void log_heap_before(CollectedHeap* heap) {
 78     log_heap(heap, true);
 79   }
 80   void log_heap_after(CollectedHeap* heap) {
 81     log_heap(heap, false);
 82   }
 83 };
 84 
 85 //
 86 // CollectedHeap
 87 //   GenCollectedHeap
 88 //     SerialHeap
 89 //     CMSHeap
 90 //   G1CollectedHeap
 91 //   ParallelScavengeHeap
 92 //
 93 class CollectedHeap : public CHeapObj<mtInternal> {
 94   friend class VMStructs;
 95   friend class JVMCIVMStructs;
 96   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 97 
 98  private:
 99 #ifdef ASSERT
100   static int       _fire_out_of_memory_count;
101 #endif
102 
103   GCHeapLog* _gc_heap_log;
104 
105   MemRegion _reserved;
106 
107  protected:
108   bool _is_gc_active;
109 
110   // Used for filler objects (static, but initialized in ctor).
111   static size_t _filler_array_max_size;
112 
113   unsigned int _total_collections;          // ... started
114   unsigned int _total_full_collections;     // ... started
115   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
116   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
117 
118   // Reason for current garbage collection.  Should be set to
119   // a value reflecting no collection between collections.
120   GCCause::Cause _gc_cause;
121   GCCause::Cause _gc_lastcause;
122   PerfStringVariable* _perf_gc_cause;
123   PerfStringVariable* _perf_gc_lastcause;
124 
125   // Constructor
126   CollectedHeap();
127 
128   // Create a new tlab. All TLAB allocations must go through this.
129   // To allow more flexible TLAB allocations min_size specifies
130   // the minimum size needed, while requested_size is the requested
131   // size based on ergonomics. The actually allocated size will be
132   // returned in actual_size.
133   virtual HeapWord* allocate_new_tlab(size_t min_size,
134                                       size_t requested_size,
135                                       size_t* actual_size);
136 
137   // Accumulate statistics on all tlabs.
138   virtual void accumulate_statistics_all_tlabs();
139 
140   // Reinitialize tlabs before resuming mutators.
141   virtual void resize_all_tlabs();
142 
143   // Allocate from the current thread's TLAB, with broken-out slow path.
144   inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
145   static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
146 
147   // Allocate an uninitialized block of the given size, or returns NULL if
148   // this is impossible.
149   inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
150 
151   // Like allocate_init, but the block returned by a successful allocation
152   // is guaranteed initialized to zeros.
153   inline static HeapWord* common_mem_allocate_init(Klass* klass, size_t size, TRAPS);
154 
155   // Helper functions for (VM) allocation.
156   inline static void post_allocation_setup_common(Klass* klass, HeapWord* obj);
157   inline static void post_allocation_setup_no_klass_install(Klass* klass,
158                                                             HeapWord* objPtr);
159 
160   inline static void post_allocation_setup_obj(Klass* klass, HeapWord* obj, int size);
161 
162   inline static void post_allocation_setup_array(Klass* klass,
163                                                  HeapWord* obj, int length);
164 
165   inline static void post_allocation_setup_class(Klass* klass, HeapWord* obj, int size);
166 
167   // Clears an allocated object.
168   inline static void init_obj(HeapWord* obj, size_t size);
169 
170   // Filler object utilities.
171   static inline size_t filler_array_hdr_size();
172   static inline size_t filler_array_min_size();
173 
174   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
175   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
176 
177   // Fill with a single array; caller must ensure filler_array_min_size() <=
178   // words <= filler_array_max_size().
179   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
180 
181   // Fill with a single object (either an int array or a java.lang.Object).
182   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
183 
184   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
185 
186   // Verification functions
187   virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
188     PRODUCT_RETURN;
189   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
190     PRODUCT_RETURN;
191   debug_only(static void check_for_valid_allocation_state();)
192 
193  public:
194   enum Name {
195     None,
196     Serial,
197     Parallel,
198     CMS,
199     G1
200   };
201 
202   static inline size_t filler_array_max_size() {
203     return _filler_array_max_size;
204   }
205 
206   virtual Name kind() const = 0;
207 
208   virtual const char* name() const = 0;
209 
210   /**
211    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
212    * and JNI_OK on success.
213    */
214   virtual jint initialize() = 0;
215 
216   // In many heaps, there will be a need to perform some initialization activities
217   // after the Universe is fully formed, but before general heap allocation is allowed.
218   // This is the correct place to place such initialization methods.
219   virtual void post_initialize();
220 
221   // Stop any onging concurrent work and prepare for exit.
222   virtual void stop() {}
223 
224   // Stop and resume concurrent GC threads interfering with safepoint operations
225   virtual void safepoint_synchronize_begin() {}
226   virtual void safepoint_synchronize_end() {}
227 
228   void initialize_reserved_region(HeapWord *start, HeapWord *end);
229   MemRegion reserved_region() const { return _reserved; }
230   address base() const { return (address)reserved_region().start(); }
231 
232   virtual size_t capacity() const = 0;
233   virtual size_t used() const = 0;
234 
235   // Return "true" if the part of the heap that allocates Java
236   // objects has reached the maximal committed limit that it can
237   // reach, without a garbage collection.
238   virtual bool is_maximal_no_gc() const = 0;
239 
240   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
241   // memory that the vm could make available for storing 'normal' java objects.
242   // This is based on the reserved address space, but should not include space
243   // that the vm uses internally for bookkeeping or temporary storage
244   // (e.g., in the case of the young gen, one of the survivor
245   // spaces).
246   virtual size_t max_capacity() const = 0;
247 
248   // Returns "TRUE" if "p" points into the reserved area of the heap.
249   bool is_in_reserved(const void* p) const {
250     return _reserved.contains(p);
251   }
252 
253   bool is_in_reserved_or_null(const void* p) const {
254     return p == NULL || is_in_reserved(p);
255   }
256 
257   // Returns "TRUE" iff "p" points into the committed areas of the heap.
258   // This method can be expensive so avoid using it in performance critical
259   // code.
260   virtual bool is_in(const void* p) const = 0;
261 
262   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
263 
264   // Let's define some terms: a "closed" subset of a heap is one that
265   //
266   // 1) contains all currently-allocated objects, and
267   //
268   // 2) is closed under reference: no object in the closed subset
269   //    references one outside the closed subset.
270   //
271   // Membership in a heap's closed subset is useful for assertions.
272   // Clearly, the entire heap is a closed subset, so the default
273   // implementation is to use "is_in_reserved".  But this may not be too
274   // liberal to perform useful checking.  Also, the "is_in" predicate
275   // defines a closed subset, but may be too expensive, since "is_in"
276   // verifies that its argument points to an object head.  The
277   // "closed_subset" method allows a heap to define an intermediate
278   // predicate, allowing more precise checking than "is_in_reserved" at
279   // lower cost than "is_in."
280 
281   // One important case is a heap composed of disjoint contiguous spaces,
282   // such as the Garbage-First collector.  Such heaps have a convenient
283   // closed subset consisting of the allocated portions of those
284   // contiguous spaces.
285 
286   // Return "TRUE" iff the given pointer points into the heap's defined
287   // closed subset (which defaults to the entire heap).
288   virtual bool is_in_closed_subset(const void* p) const {
289     return is_in_reserved(p);
290   }
291 
292   bool is_in_closed_subset_or_null(const void* p) const {
293     return p == NULL || is_in_closed_subset(p);
294   }
295 
296   void set_gc_cause(GCCause::Cause v) {
297      if (UsePerfData) {
298        _gc_lastcause = _gc_cause;
299        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
300        _perf_gc_cause->set_value(GCCause::to_string(v));
301      }
302     _gc_cause = v;
303   }
304   GCCause::Cause gc_cause() { return _gc_cause; }
305 
306   // General obj/array allocation facilities.
307   inline static oop obj_allocate(Klass* klass, int size, TRAPS);
308   inline static oop array_allocate(Klass* klass, int size, int length, TRAPS);
309   inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
310   inline static oop class_allocate(Klass* klass, int size, TRAPS);
311 
312   // Raw memory allocation facilities
313   // The obj and array allocate methods are covers for these methods.
314   // mem_allocate() should never be
315   // called to allocate TLABs, only individual objects.
316   virtual HeapWord* mem_allocate(size_t size,
317                                  bool* gc_overhead_limit_was_exceeded) = 0;
318 
319   // Raw memory allocation. This may or may not use TLAB allocations to satisfy the
320   // allocation. A GC implementation may override this function to satisfy the allocation
321   // in any way. But the default is to try a TLAB allocation, and otherwise perform
322   // mem_allocate.
323   virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
324                                      bool* gc_overhead_limit_was_exceeded, TRAPS);
325 
326   // Utilities for turning raw memory into filler objects.
327   //
328   // min_fill_size() is the smallest region that can be filled.
329   // fill_with_objects() can fill arbitrary-sized regions of the heap using
330   // multiple objects.  fill_with_object() is for regions known to be smaller
331   // than the largest array of integers; it uses a single object to fill the
332   // region and has slightly less overhead.
333   static size_t min_fill_size() {
334     return size_t(align_object_size(oopDesc::header_size()));
335   }
336 
337   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
338 
339   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
340   static void fill_with_object(MemRegion region, bool zap = true) {
341     fill_with_object(region.start(), region.word_size(), zap);
342   }
343   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
344     fill_with_object(start, pointer_delta(end, start), zap);
345   }
346 
347   // Return the address "addr" aligned by "alignment_in_bytes" if such
348   // an address is below "end".  Return NULL otherwise.
349   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
350                                                    HeapWord* end,
351                                                    unsigned short alignment_in_bytes);
352 
353   // Some heaps may offer a contiguous region for shared non-blocking
354   // allocation, via inlined code (by exporting the address of the top and
355   // end fields defining the extent of the contiguous allocation region.)
356 
357   // This function returns "true" iff the heap supports this kind of
358   // allocation.  (Default is "no".)
359   virtual bool supports_inline_contig_alloc() const {
360     return false;
361   }
362   // These functions return the addresses of the fields that define the
363   // boundaries of the contiguous allocation area.  (These fields should be
364   // physically near to one another.)
365   virtual HeapWord* volatile* top_addr() const {
366     guarantee(false, "inline contiguous allocation not supported");
367     return NULL;
368   }
369   virtual HeapWord** end_addr() const {
370     guarantee(false, "inline contiguous allocation not supported");
371     return NULL;
372   }
373 
374   // Some heaps may be in an unparseable state at certain times between
375   // collections. This may be necessary for efficient implementation of
376   // certain allocation-related activities. Calling this function before
377   // attempting to parse a heap ensures that the heap is in a parsable
378   // state (provided other concurrent activity does not introduce
379   // unparsability). It is normally expected, therefore, that this
380   // method is invoked with the world stopped.
381   // NOTE: if you override this method, make sure you call
382   // super::ensure_parsability so that the non-generational
383   // part of the work gets done. See implementation of
384   // CollectedHeap::ensure_parsability and, for instance,
385   // that of GenCollectedHeap::ensure_parsability().
386   // The argument "retire_tlabs" controls whether existing TLABs
387   // are merely filled or also retired, thus preventing further
388   // allocation from them and necessitating allocation of new TLABs.
389   virtual void ensure_parsability(bool retire_tlabs);
390 
391   // Section on thread-local allocation buffers (TLABs)
392   // If the heap supports thread-local allocation buffers, it should override
393   // the following methods:
394   // Returns "true" iff the heap supports thread-local allocation buffers.
395   // The default is "no".
396   virtual bool supports_tlab_allocation() const = 0;
397 
398   // The amount of space available for thread-local allocation buffers.
399   virtual size_t tlab_capacity(Thread *thr) const = 0;
400 
401   // The amount of used space for thread-local allocation buffers for the given thread.
402   virtual size_t tlab_used(Thread *thr) const = 0;
403 
404   virtual size_t max_tlab_size() const;
405 
406   // An estimate of the maximum allocation that could be performed
407   // for thread-local allocation buffers without triggering any
408   // collection or expansion activity.
409   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
410     guarantee(false, "thread-local allocation buffers not supported");
411     return 0;
412   }
413 
414   // Perform a collection of the heap; intended for use in implementing
415   // "System.gc".  This probably implies as full a collection as the
416   // "CollectedHeap" supports.
417   virtual void collect(GCCause::Cause cause) = 0;
418 
419   // Perform a full collection
420   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
421 
422   // This interface assumes that it's being called by the
423   // vm thread. It collects the heap assuming that the
424   // heap lock is already held and that we are executing in
425   // the context of the vm thread.
426   virtual void collect_as_vm_thread(GCCause::Cause cause);
427 
428   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
429                                                        size_t size,
430                                                        Metaspace::MetadataType mdtype);
431 
432   // Returns "true" iff there is a stop-world GC in progress.  (I assume
433   // that it should answer "false" for the concurrent part of a concurrent
434   // collector -- dld).
435   bool is_gc_active() const { return _is_gc_active; }
436 
437   // Total number of GC collections (started)
438   unsigned int total_collections() const { return _total_collections; }
439   unsigned int total_full_collections() const { return _total_full_collections;}
440 
441   // Increment total number of GC collections (started)
442   // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
443   void increment_total_collections(bool full = false) {
444     _total_collections++;
445     if (full) {
446       increment_total_full_collections();
447     }
448   }
449 
450   void increment_total_full_collections() { _total_full_collections++; }
451 
452   // Return the CollectorPolicy for the heap
453   virtual CollectorPolicy* collector_policy() const = 0;
454 
455   // Return the SoftRefPolicy for the heap;
456   virtual SoftRefPolicy* soft_ref_policy() = 0;
457 
458   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
459   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
460 
461   // Iterate over all objects, calling "cl.do_object" on each.
462   virtual void object_iterate(ObjectClosure* cl) = 0;
463 
464   // Similar to object_iterate() except iterates only
465   // over live objects.
466   virtual void safe_object_iterate(ObjectClosure* cl) = 0;
467 
468   // NOTE! There is no requirement that a collector implement these
469   // functions.
470   //
471   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
472   // each address in the (reserved) heap is a member of exactly
473   // one block.  The defining characteristic of a block is that it is
474   // possible to find its size, and thus to progress forward to the next
475   // block.  (Blocks may be of different sizes.)  Thus, blocks may
476   // represent Java objects, or they might be free blocks in a
477   // free-list-based heap (or subheap), as long as the two kinds are
478   // distinguishable and the size of each is determinable.
479 
480   // Returns the address of the start of the "block" that contains the
481   // address "addr".  We say "blocks" instead of "object" since some heaps
482   // may not pack objects densely; a chunk may either be an object or a
483   // non-object.
484   virtual HeapWord* block_start(const void* addr) const = 0;
485 
486   // Requires "addr" to be the start of a chunk, and returns its size.
487   // "addr + size" is required to be the start of a new chunk, or the end
488   // of the active area of the heap.
489   virtual size_t block_size(const HeapWord* addr) const = 0;
490 
491   // Requires "addr" to be the start of a block, and returns "TRUE" iff
492   // the block is an object.
493   virtual bool block_is_obj(const HeapWord* addr) const = 0;
494 
495   // Returns the longest time (in ms) that has elapsed since the last
496   // time that any part of the heap was examined by a garbage collection.
497   virtual jlong millis_since_last_gc() = 0;
498 
499   // Perform any cleanup actions necessary before allowing a verification.
500   virtual void prepare_for_verify() = 0;
501 
502   // Generate any dumps preceding or following a full gc
503  private:
504   void full_gc_dump(GCTimer* timer, bool before);
505 
506   virtual void initialize_serviceability() = 0;
507 
508  public:
509   void pre_full_gc_dump(GCTimer* timer);
510   void post_full_gc_dump(GCTimer* timer);
511 
512   virtual VirtualSpaceSummary create_heap_space_summary();
513   GCHeapSummary create_heap_summary();
514 
515   MetaspaceSummary create_metaspace_summary();
516 
517   // Print heap information on the given outputStream.
518   virtual void print_on(outputStream* st) const = 0;
519   // The default behavior is to call print_on() on tty.
520   virtual void print() const {
521     print_on(tty);
522   }
523   // Print more detailed heap information on the given
524   // outputStream. The default behavior is to call print_on(). It is
525   // up to each subclass to override it and add any additional output
526   // it needs.
527   virtual void print_extended_on(outputStream* st) const {
528     print_on(st);
529   }
530 
531   virtual void print_on_error(outputStream* st) const;
532 
533   // Print all GC threads (other than the VM thread)
534   // used by this heap.
535   virtual void print_gc_threads_on(outputStream* st) const = 0;
536   // The default behavior is to call print_gc_threads_on() on tty.
537   void print_gc_threads() {
538     print_gc_threads_on(tty);
539   }
540   // Iterator for all GC threads (other than VM thread)
541   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
542 
543   // Print any relevant tracing info that flags imply.
544   // Default implementation does nothing.
545   virtual void print_tracing_info() const = 0;
546 
547   void print_heap_before_gc();
548   void print_heap_after_gc();
549 
550   // An object is scavengable if its location may move during a scavenge.
551   // (A scavenge is a GC which is not a full GC.)
552   virtual bool is_scavengable(oop obj) = 0;
553   // Registering and unregistering an nmethod (compiled code) with the heap.
554   // Override with specific mechanism for each specialized heap type.
555   virtual void register_nmethod(nmethod* nm) {}
556   virtual void unregister_nmethod(nmethod* nm) {}
557   virtual void verify_nmethod(nmethod* nmethod) {}
558 
559   void trace_heap_before_gc(const GCTracer* gc_tracer);
560   void trace_heap_after_gc(const GCTracer* gc_tracer);
561 
562   // Heap verification
563   virtual void verify(VerifyOption option) = 0;
564 
565   // Return true if concurrent phase control (via
566   // request_concurrent_phase_control) is supported by this collector.
567   // The default implementation returns false.
568   virtual bool supports_concurrent_phase_control() const;
569 
570   // Return a NULL terminated array of concurrent phase names provided
571   // by this collector.  Supports Whitebox testing.  These are the
572   // names recognized by request_concurrent_phase(). The default
573   // implementation returns an array of one NULL element.
574   virtual const char* const* concurrent_phases() const;
575 
576   // Request the collector enter the indicated concurrent phase, and
577   // wait until it does so.  Supports WhiteBox testing.  Only one
578   // request may be active at a time.  Phases are designated by name;
579   // the set of names and their meaning is GC-specific.  Once the
580   // requested phase has been reached, the collector will attempt to
581   // avoid transitioning to a new phase until a new request is made.
582   // [Note: A collector might not be able to remain in a given phase.
583   // For example, a full collection might cancel an in-progress
584   // concurrent collection.]
585   //
586   // Returns true when the phase is reached.  Returns false for an
587   // unknown phase.  The default implementation returns false.
588   virtual bool request_concurrent_phase(const char* phase);
589 
590   // Provides a thread pool to SafepointSynchronize to use
591   // for parallel safepoint cleanup.
592   // GCs that use a GC worker thread pool may want to share
593   // it for use during safepoint cleanup. This is only possible
594   // if the GC can pause and resume concurrent work (e.g. G1
595   // concurrent marking) for an intermittent non-GC safepoint.
596   // If this method returns NULL, SafepointSynchronize will
597   // perform cleanup tasks serially in the VMThread.
598   virtual WorkGang* get_safepoint_workers() { return NULL; }
599 
600   // Support for object pinning. This is used by JNI Get*Critical()
601   // and Release*Critical() family of functions. If supported, the GC
602   // must guarantee that pinned objects never move.
603   virtual bool supports_object_pinning() const;
604   virtual oop pin_object(JavaThread* thread, oop obj);
605   virtual void unpin_object(JavaThread* thread, oop obj);
606 
607   // Deduplicate the string, iff the GC supports string deduplication.
608   virtual void deduplicate_string(oop str);
609 
610   virtual bool is_oop(oop object) const;
611 
612   // Non product verification and debugging.
613 #ifndef PRODUCT
614   // Support for PromotionFailureALot.  Return true if it's time to cause a
615   // promotion failure.  The no-argument version uses
616   // this->_promotion_failure_alot_count as the counter.
617   bool promotion_should_fail(volatile size_t* count);
618   bool promotion_should_fail();
619 
620   // Reset the PromotionFailureALot counters.  Should be called at the end of a
621   // GC in which promotion failure occurred.
622   void reset_promotion_should_fail(volatile size_t* count);
623   void reset_promotion_should_fail();
624 #endif  // #ifndef PRODUCT
625 
626 #ifdef ASSERT
627   static int fired_fake_oom() {
628     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
629   }
630 #endif
631 };
632 
633 // Class to set and reset the GC cause for a CollectedHeap.
634 
635 class GCCauseSetter : StackObj {
636   CollectedHeap* _heap;
637   GCCause::Cause _previous_cause;
638  public:
639   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
640     _heap = heap;
641     _previous_cause = _heap->gc_cause();
642     _heap->set_gc_cause(cause);
643   }
644 
645   ~GCCauseSetter() {
646     _heap->set_gc_cause(_previous_cause);
647   }
648 };
649 
650 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_HPP