1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_GENERATION_HPP
  26 #define SHARE_VM_GC_SHARED_GENERATION_HPP
  27 
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/referenceProcessor.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/memRegion.hpp"
  33 #include "memory/universe.hpp"
  34 #include "memory/virtualspace.hpp"
  35 #include "runtime/mutex.hpp"
  36 #include "runtime/perfData.hpp"
  37 
  38 // A Generation models a heap area for similarly-aged objects.
  39 // It will contain one ore more spaces holding the actual objects.
  40 //
  41 // The Generation class hierarchy:
  42 //
  43 // Generation                      - abstract base class
  44 // - DefNewGeneration              - allocation area (copy collected)
  45 //   - ParNewGeneration            - a DefNewGeneration that is collected by
  46 //                                   several threads
  47 // - CardGeneration                 - abstract class adding offset array behavior
  48 //   - TenuredGeneration             - tenured (old object) space (markSweepCompact)
  49 //   - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
  50 //                                       (Detlefs-Printezis refinement of
  51 //                                       Boehm-Demers-Schenker)
  52 //
  53 // The system configurations currently allowed are:
  54 //
  55 //   DefNewGeneration + TenuredGeneration
  56 //
  57 //   ParNewGeneration + ConcurrentMarkSweepGeneration
  58 //
  59 
  60 class DefNewGeneration;
  61 class GCMemoryManager;
  62 class GenerationSpec;
  63 class CompactibleSpace;
  64 class ContiguousSpace;
  65 class CompactPoint;
  66 class OopsInGenClosure;
  67 class OopClosure;
  68 class ScanClosure;
  69 class FastScanClosure;
  70 class GenCollectedHeap;
  71 class GCStats;
  72 
  73 // A "ScratchBlock" represents a block of memory in one generation usable by
  74 // another.  It represents "num_words" free words, starting at and including
  75 // the address of "this".
  76 struct ScratchBlock {
  77   ScratchBlock* next;
  78   size_t num_words;
  79   HeapWord scratch_space[1];  // Actually, of size "num_words-2" (assuming
  80                               // first two fields are word-sized.)
  81 };
  82 
  83 class Generation: public CHeapObj<mtGC> {
  84   friend class VMStructs;
  85  private:
  86   jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
  87   MemRegion _prev_used_region; // for collectors that want to "remember" a value for
  88                                // used region at some specific point during collection.
  89 
  90   GCMemoryManager* _gc_manager;
  91 
  92  protected:
  93   // Minimum and maximum addresses for memory reserved (not necessarily
  94   // committed) for generation.
  95   // Used by card marking code. Must not overlap with address ranges of
  96   // other generations.
  97   MemRegion _reserved;
  98 
  99   // Memory area reserved for generation
 100   VirtualSpace _virtual_space;
 101 
 102   // ("Weak") Reference processing support
 103   SpanReferenceProcessor* _ref_processor;
 104 
 105   // Performance Counters
 106   CollectorCounters* _gc_counters;
 107 
 108   // Statistics for garbage collection
 109   GCStats* _gc_stats;
 110 
 111   // Initialize the generation.
 112   Generation(ReservedSpace rs, size_t initial_byte_size);
 113 
 114   // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
 115   // "sp" that point into younger generations.
 116   // The iteration is only over objects allocated at the start of the
 117   // iterations; objects allocated as a result of applying the closure are
 118   // not included.
 119   void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
 120 
 121  public:
 122   // The set of possible generation kinds.
 123   enum Name {
 124     DefNew,
 125     ParNew,
 126     MarkSweepCompact,
 127     ConcurrentMarkSweep,
 128     Other
 129   };
 130 
 131   enum SomePublicConstants {
 132     // Generations are GenGrain-aligned and have size that are multiples of
 133     // GenGrain.
 134     // Note: on ARM we add 1 bit for card_table_base to be properly aligned
 135     // (we expect its low byte to be zero - see implementation of post_barrier)
 136     LogOfGenGrain = 16 ARM32_ONLY(+1),
 137     GenGrain = 1 << LogOfGenGrain
 138   };
 139 
 140   // allocate and initialize ("weak") refs processing support
 141   virtual void ref_processor_init();
 142   void set_ref_processor(SpanReferenceProcessor* rp) {
 143     assert(_ref_processor == NULL, "clobbering existing _ref_processor");
 144     _ref_processor = rp;
 145   }
 146 
 147   virtual Generation::Name kind() { return Generation::Other; }
 148 
 149   // This properly belongs in the collector, but for now this
 150   // will do.
 151   virtual bool refs_discovery_is_atomic() const { return true;  }
 152   virtual bool refs_discovery_is_mt()     const { return false; }
 153 
 154   // Space inquiries (results in bytes)
 155   size_t initial_size();
 156   virtual size_t capacity() const = 0;  // The maximum number of object bytes the
 157                                         // generation can currently hold.
 158   virtual size_t used() const = 0;      // The number of used bytes in the gen.
 159   virtual size_t free() const = 0;      // The number of free bytes in the gen.
 160 
 161   // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
 162   // Returns the total number of bytes  available in a generation
 163   // for the allocation of objects.
 164   virtual size_t max_capacity() const;
 165 
 166   // If this is a young generation, the maximum number of bytes that can be
 167   // allocated in this generation before a GC is triggered.
 168   virtual size_t capacity_before_gc() const { return 0; }
 169 
 170   // The largest number of contiguous free bytes in the generation,
 171   // including expansion  (Assumes called at a safepoint.)
 172   virtual size_t contiguous_available() const = 0;
 173   // The largest number of contiguous free bytes in this or any higher generation.
 174   virtual size_t max_contiguous_available() const;
 175 
 176   // Returns true if promotions of the specified amount are
 177   // likely to succeed without a promotion failure.
 178   // Promotion of the full amount is not guaranteed but
 179   // might be attempted in the worst case.
 180   virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
 181 
 182   // For a non-young generation, this interface can be used to inform a
 183   // generation that a promotion attempt into that generation failed.
 184   // Typically used to enable diagnostic output for post-mortem analysis,
 185   // but other uses of the interface are not ruled out.
 186   virtual void promotion_failure_occurred() { /* does nothing */ }
 187 
 188   // Return an estimate of the maximum allocation that could be performed
 189   // in the generation without triggering any collection or expansion
 190   // activity.  It is "unsafe" because no locks are taken; the result
 191   // should be treated as an approximation, not a guarantee, for use in
 192   // heuristic resizing decisions.
 193   virtual size_t unsafe_max_alloc_nogc() const = 0;
 194 
 195   // Returns true if this generation cannot be expanded further
 196   // without a GC. Override as appropriate.
 197   virtual bool is_maximal_no_gc() const {
 198     return _virtual_space.uncommitted_size() == 0;
 199   }
 200 
 201   MemRegion reserved() const { return _reserved; }
 202 
 203   // Returns a region guaranteed to contain all the objects in the
 204   // generation.
 205   virtual MemRegion used_region() const { return _reserved; }
 206 
 207   MemRegion prev_used_region() const { return _prev_used_region; }
 208   virtual void  save_used_region()   { _prev_used_region = used_region(); }
 209 
 210   // Returns "TRUE" iff "p" points into the committed areas in the generation.
 211   // For some kinds of generations, this may be an expensive operation.
 212   // To avoid performance problems stemming from its inadvertent use in
 213   // product jvm's, we restrict its use to assertion checking or
 214   // verification only.
 215   virtual bool is_in(const void* p) const;
 216 
 217   /* Returns "TRUE" iff "p" points into the reserved area of the generation. */
 218   bool is_in_reserved(const void* p) const {
 219     return _reserved.contains(p);
 220   }
 221 
 222   // If some space in the generation contains the given "addr", return a
 223   // pointer to that space, else return "NULL".
 224   virtual Space* space_containing(const void* addr) const;
 225 
 226   // Iteration - do not use for time critical operations
 227   virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0;
 228 
 229   // Returns the first space, if any, in the generation that can participate
 230   // in compaction, or else "NULL".
 231   virtual CompactibleSpace* first_compaction_space() const = 0;
 232 
 233   // Returns "true" iff this generation should be used to allocate an
 234   // object of the given size.  Young generations might
 235   // wish to exclude very large objects, for example, since, if allocated
 236   // often, they would greatly increase the frequency of young-gen
 237   // collection.
 238   virtual bool should_allocate(size_t word_size, bool is_tlab) {
 239     bool result = false;
 240     size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
 241     if (!is_tlab || supports_tlab_allocation()) {
 242       result = (word_size > 0) && (word_size < overflow_limit);
 243     }
 244     return result;
 245   }
 246 
 247   // Allocate and returns a block of the requested size, or returns "NULL".
 248   // Assumes the caller has done any necessary locking.
 249   virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
 250 
 251   // Like "allocate", but performs any necessary locking internally.
 252   virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
 253 
 254   // Some generation may offer a region for shared, contiguous allocation,
 255   // via inlined code (by exporting the address of the top and end fields
 256   // defining the extent of the contiguous allocation region.)
 257 
 258   // This function returns "true" iff the heap supports this kind of
 259   // allocation.  (More precisely, this means the style of allocation that
 260   // increments *top_addr()" with a CAS.) (Default is "no".)
 261   // A generation that supports this allocation style must use lock-free
 262   // allocation for *all* allocation, since there are times when lock free
 263   // allocation will be concurrent with plain "allocate" calls.
 264   virtual bool supports_inline_contig_alloc() const { return false; }
 265 
 266   // These functions return the addresses of the fields that define the
 267   // boundaries of the contiguous allocation area.  (These fields should be
 268   // physically near to one another.)
 269   virtual HeapWord* volatile* top_addr() const { return NULL; }
 270   virtual HeapWord** end_addr() const { return NULL; }
 271 
 272   // Thread-local allocation buffers
 273   virtual bool supports_tlab_allocation() const { return false; }
 274   virtual size_t tlab_capacity() const {
 275     guarantee(false, "Generation doesn't support thread local allocation buffers");
 276     return 0;
 277   }
 278   virtual size_t tlab_used() const {
 279     guarantee(false, "Generation doesn't support thread local allocation buffers");
 280     return 0;
 281   }
 282   virtual size_t unsafe_max_tlab_alloc() const {
 283     guarantee(false, "Generation doesn't support thread local allocation buffers");
 284     return 0;
 285   }
 286 
 287   // "obj" is the address of an object in a younger generation.  Allocate space
 288   // for "obj" in the current (or some higher) generation, and copy "obj" into
 289   // the newly allocated space, if possible, returning the result (or NULL if
 290   // the allocation failed).
 291   //
 292   // The "obj_size" argument is just obj->size(), passed along so the caller can
 293   // avoid repeating the virtual call to retrieve it.
 294   virtual oop promote(oop obj, size_t obj_size);
 295 
 296   // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
 297   // object "obj", whose original mark word was "m", and whose size is
 298   // "word_sz".  If possible, allocate space for "obj", copy obj into it
 299   // (taking care to copy "m" into the mark word when done, since the mark
 300   // word of "obj" may have been overwritten with a forwarding pointer, and
 301   // also taking care to copy the klass pointer *last*.  Returns the new
 302   // object if successful, or else NULL.
 303   virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
 304 
 305   // Informs the current generation that all par_promote_alloc's in the
 306   // collection have been completed; any supporting data structures can be
 307   // reset.  Default is to do nothing.
 308   virtual void par_promote_alloc_done(int thread_num) {}
 309 
 310   // Informs the current generation that all oop_since_save_marks_iterates
 311   // performed by "thread_num" in the current collection, if any, have been
 312   // completed; any supporting data structures can be reset.  Default is to
 313   // do nothing.
 314   virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
 315 
 316   // Returns "true" iff collect() should subsequently be called on this
 317   // this generation. See comment below.
 318   // This is a generic implementation which can be overridden.
 319   //
 320   // Note: in the current (1.4) implementation, when genCollectedHeap's
 321   // incremental_collection_will_fail flag is set, all allocations are
 322   // slow path (the only fast-path place to allocate is DefNew, which
 323   // will be full if the flag is set).
 324   // Thus, older generations which collect younger generations should
 325   // test this flag and collect if it is set.
 326   virtual bool should_collect(bool   full,
 327                               size_t word_size,
 328                               bool   is_tlab) {
 329     return (full || should_allocate(word_size, is_tlab));
 330   }
 331 
 332   // Returns true if the collection is likely to be safely
 333   // completed. Even if this method returns true, a collection
 334   // may not be guaranteed to succeed, and the system should be
 335   // able to safely unwind and recover from that failure, albeit
 336   // at some additional cost.
 337   virtual bool collection_attempt_is_safe() {
 338     guarantee(false, "Are you sure you want to call this method?");
 339     return true;
 340   }
 341 
 342   // Perform a garbage collection.
 343   // If full is true attempt a full garbage collection of this generation.
 344   // Otherwise, attempting to (at least) free enough space to support an
 345   // allocation of the given "word_size".
 346   virtual void collect(bool   full,
 347                        bool   clear_all_soft_refs,
 348                        size_t word_size,
 349                        bool   is_tlab) = 0;
 350 
 351   // Perform a heap collection, attempting to create (at least) enough
 352   // space to support an allocation of the given "word_size".  If
 353   // successful, perform the allocation and return the resulting
 354   // "oop" (initializing the allocated block). If the allocation is
 355   // still unsuccessful, return "NULL".
 356   virtual HeapWord* expand_and_allocate(size_t word_size,
 357                                         bool is_tlab,
 358                                         bool parallel = false) = 0;
 359 
 360   // Some generations may require some cleanup or preparation actions before
 361   // allowing a collection.  The default is to do nothing.
 362   virtual void gc_prologue(bool full) {}
 363 
 364   // Some generations may require some cleanup actions after a collection.
 365   // The default is to do nothing.
 366   virtual void gc_epilogue(bool full) {}
 367 
 368   // Save the high water marks for the used space in a generation.
 369   virtual void record_spaces_top() {}
 370 
 371   // Some generations may need to be "fixed-up" after some allocation
 372   // activity to make them parsable again. The default is to do nothing.
 373   virtual void ensure_parsability() {}
 374 
 375   // Time (in ms) when we were last collected or now if a collection is
 376   // in progress.
 377   virtual jlong time_of_last_gc(jlong now) {
 378     // Both _time_of_last_gc and now are set using a time source
 379     // that guarantees monotonically non-decreasing values provided
 380     // the underlying platform provides such a source. So we still
 381     // have to guard against non-monotonicity.
 382     NOT_PRODUCT(
 383       if (now < _time_of_last_gc) {
 384         log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now);
 385       }
 386     )
 387     return _time_of_last_gc;
 388   }
 389 
 390   virtual void update_time_of_last_gc(jlong now)  {
 391     _time_of_last_gc = now;
 392   }
 393 
 394   // Generations may keep statistics about collection. This method
 395   // updates those statistics. current_generation is the generation
 396   // that was most recently collected. This allows the generation to
 397   // decide what statistics are valid to collect. For example, the
 398   // generation can decide to gather the amount of promoted data if
 399   // the collection of the young generation has completed.
 400   GCStats* gc_stats() const { return _gc_stats; }
 401   virtual void update_gc_stats(Generation* current_generation, bool full) {}
 402 
 403   // Mark sweep support phase2
 404   virtual void prepare_for_compaction(CompactPoint* cp);
 405   // Mark sweep support phase3
 406   virtual void adjust_pointers();
 407   // Mark sweep support phase4
 408   virtual void compact();
 409   virtual void post_compact() { ShouldNotReachHere(); }
 410 
 411   // Support for CMS's rescan. In this general form we return a pointer
 412   // to an abstract object that can be used, based on specific previously
 413   // decided protocols, to exchange information between generations,
 414   // information that may be useful for speeding up certain types of
 415   // garbage collectors. A NULL value indicates to the client that
 416   // no data recording is expected by the provider. The data-recorder is
 417   // expected to be GC worker thread-local, with the worker index
 418   // indicated by "thr_num".
 419   virtual void* get_data_recorder(int thr_num) { return NULL; }
 420   virtual void sample_eden_chunk() {}
 421 
 422   // Some generations may require some cleanup actions before allowing
 423   // a verification.
 424   virtual void prepare_for_verify() {}
 425 
 426   // Accessing "marks".
 427 
 428   // This function gives a generation a chance to note a point between
 429   // collections.  For example, a contiguous generation might note the
 430   // beginning allocation point post-collection, which might allow some later
 431   // operations to be optimized.
 432   virtual void save_marks() {}
 433 
 434   // This function allows generations to initialize any "saved marks".  That
 435   // is, should only be called when the generation is empty.
 436   virtual void reset_saved_marks() {}
 437 
 438   // This function is "true" iff any no allocations have occurred in the
 439   // generation since the last call to "save_marks".
 440   virtual bool no_allocs_since_save_marks() = 0;
 441 
 442   // Apply "cl->apply" to (the addresses of) all reference fields in objects
 443   // allocated in the current generation since the last call to "save_marks".
 444   // If more objects are allocated in this generation as a result of applying
 445   // the closure, iterates over reference fields in those objects as well.
 446   // Calls "save_marks" at the end of the iteration.
 447   // General signature...
 448   virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
 449   // ...and specializations for de-virtualization.  (The general
 450   // implementation of the _nv versions call the virtual version.
 451   // Note that the _nv suffix is not really semantically necessary,
 452   // but it avoids some not-so-useful warnings on Solaris.)
 453 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
 454   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {    \
 455     oop_since_save_marks_iterate_v((OopsInGenClosure*)cl);                      \
 456   }
 457   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
 458 
 459 #undef Generation_SINCE_SAVE_MARKS_DECL
 460 
 461   // The "requestor" generation is performing some garbage collection
 462   // action for which it would be useful to have scratch space.  If
 463   // the target is not the requestor, no gc actions will be required
 464   // of the target.  The requestor promises to allocate no more than
 465   // "max_alloc_words" in the target generation (via promotion say,
 466   // if the requestor is a young generation and the target is older).
 467   // If the target generation can provide any scratch space, it adds
 468   // it to "list", leaving "list" pointing to the head of the
 469   // augmented list.  The default is to offer no space.
 470   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
 471                                   size_t max_alloc_words) {}
 472 
 473   // Give each generation an opportunity to do clean up for any
 474   // contributed scratch.
 475   virtual void reset_scratch() {}
 476 
 477   // When an older generation has been collected, and perhaps resized,
 478   // this method will be invoked on all younger generations (from older to
 479   // younger), allowing them to resize themselves as appropriate.
 480   virtual void compute_new_size() = 0;
 481 
 482   // Printing
 483   virtual const char* name() const = 0;
 484   virtual const char* short_name() const = 0;
 485 
 486   // Reference Processing accessor
 487   SpanReferenceProcessor* const ref_processor() { return _ref_processor; }
 488 
 489   // Iteration.
 490 
 491   // Iterate over all the ref-containing fields of all objects in the
 492   // generation, calling "cl.do_oop" on each.
 493   virtual void oop_iterate(ExtendedOopClosure* cl);
 494 
 495   // Iterate over all objects in the generation, calling "cl.do_object" on
 496   // each.
 497   virtual void object_iterate(ObjectClosure* cl);
 498 
 499   // Iterate over all safe objects in the generation, calling "cl.do_object" on
 500   // each.  An object is safe if its references point to other objects in
 501   // the heap.  This defaults to object_iterate() unless overridden.
 502   virtual void safe_object_iterate(ObjectClosure* cl);
 503 
 504   // Apply "cl->do_oop" to (the address of) all and only all the ref fields
 505   // in the current generation that contain pointers to objects in younger
 506   // generations. Objects allocated since the last "save_marks" call are
 507   // excluded.
 508   virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0;
 509 
 510   // Inform a generation that it longer contains references to objects
 511   // in any younger generation.    [e.g. Because younger gens are empty,
 512   // clear the card table.]
 513   virtual void clear_remembered_set() { }
 514 
 515   // Inform a generation that some of its objects have moved.  [e.g. The
 516   // generation's spaces were compacted, invalidating the card table.]
 517   virtual void invalidate_remembered_set() { }
 518 
 519   // Block abstraction.
 520 
 521   // Returns the address of the start of the "block" that contains the
 522   // address "addr".  We say "blocks" instead of "object" since some heaps
 523   // may not pack objects densely; a chunk may either be an object or a
 524   // non-object.
 525   virtual HeapWord* block_start(const void* addr) const;
 526 
 527   // Requires "addr" to be the start of a chunk, and returns its size.
 528   // "addr + size" is required to be the start of a new chunk, or the end
 529   // of the active area of the heap.
 530   virtual size_t block_size(const HeapWord* addr) const ;
 531 
 532   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 533   // the block is an object.
 534   virtual bool block_is_obj(const HeapWord* addr) const;
 535 
 536   void print_heap_change(size_t prev_used) const;
 537 
 538   virtual void print() const;
 539   virtual void print_on(outputStream* st) const;
 540 
 541   virtual void verify() = 0;
 542 
 543   struct StatRecord {
 544     int invocations;
 545     elapsedTimer accumulated_time;
 546     StatRecord() :
 547       invocations(0),
 548       accumulated_time(elapsedTimer()) {}
 549   };
 550 private:
 551   StatRecord _stat_record;
 552 public:
 553   StatRecord* stat_record() { return &_stat_record; }
 554 
 555   virtual void print_summary_info_on(outputStream* st);
 556 
 557   // Performance Counter support
 558   virtual void update_counters() = 0;
 559   virtual CollectorCounters* counters() { return _gc_counters; }
 560 
 561   GCMemoryManager* gc_manager() const {
 562     assert(_gc_manager != NULL, "not initialized yet");
 563     return _gc_manager;
 564   }
 565 
 566   void set_gc_manager(GCMemoryManager* gc_manager) {
 567     _gc_manager = gc_manager;
 568   }
 569 
 570 };
 571 
 572 #endif // SHARE_VM_GC_SHARED_GENERATION_HPP