1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/concurrentMark.hpp"
  29 #include "gc/g1/evacuationInfo.hpp"
  30 #include "gc/g1/g1AllocationContext.hpp"
  31 #include "gc/g1/g1BiasedArray.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1HRPrinter.hpp"
  34 #include "gc/g1/g1InCSetState.hpp"
  35 #include "gc/g1/g1MonitoringSupport.hpp"
  36 #include "gc/g1/g1EvacFailure.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"
  38 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  39 #include "gc/g1/g1YCTypes.hpp"
  40 #include "gc/g1/hSpaceCounters.hpp"
  41 #include "gc/g1/heapRegionManager.hpp"
  42 #include "gc/g1/heapRegionSet.hpp"
  43 #include "gc/g1/youngList.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/collectedHeap.hpp"
  46 #include "gc/shared/plab.hpp"
  47 #include "memory/memRegion.hpp"
  48 #include "utilities/stack.hpp"
  49 
  50 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  51 // It uses the "Garbage First" heap organization and algorithm, which
  52 // may combine concurrent marking with parallel, incremental compaction of
  53 // heap subsets that will yield large amounts of garbage.
  54 
  55 // Forward declarations
  56 class HeapRegion;
  57 class HRRSCleanupTask;
  58 class GenerationSpec;
  59 class OopsInHeapRegionClosure;
  60 class G1ParScanThreadState;
  61 class G1ParScanThreadStateSet;
  62 class G1KlassScanClosure;
  63 class G1ParScanThreadState;
  64 class ObjectClosure;
  65 class SpaceClosure;
  66 class CompactibleSpaceClosure;
  67 class Space;
  68 class G1CollectorPolicy;
  69 class G1RemSet;
  70 class HeapRegionRemSetIterator;
  71 class ConcurrentMark;
  72 class ConcurrentMarkThread;
  73 class ConcurrentG1Refine;
  74 class ConcurrentGCTimer;
  75 class GenerationCounters;
  76 class STWGCTimer;
  77 class G1NewTracer;
  78 class G1OldTracer;
  79 class EvacuationFailedInfo;
  80 class nmethod;
  81 class Ticks;
  82 class WorkGang;
  83 class G1Allocator;
  84 class G1ArchiveAllocator;
  85 
  86 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
  87 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
  88 
  89 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
  90 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  91 
  92 // The G1 STW is alive closure.
  93 // An instance is embedded into the G1CH and used as the
  94 // (optional) _is_alive_non_header closure in the STW
  95 // reference processor. It is also extensively used during
  96 // reference processing during STW evacuation pauses.
  97 class G1STWIsAliveClosure: public BoolObjectClosure {
  98   G1CollectedHeap* _g1;
  99 public:
 100   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 101   bool do_object_b(oop p);
 102 };
 103 
 104 class RefineCardTableEntryClosure;
 105 
 106 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 107  private:
 108   void reset_from_card_cache(uint start_idx, size_t num_regions);
 109  public:
 110   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 111 };
 112 
 113 class G1CollectedHeap : public CollectedHeap {
 114   friend class VM_CollectForMetadataAllocation;
 115   friend class VM_G1CollectForAllocation;
 116   friend class VM_G1CollectFull;
 117   friend class VM_G1IncCollectionPause;
 118   friend class VMStructs;
 119   friend class MutatorAllocRegion;
 120   friend class G1GCAllocRegion;
 121 
 122   // Closures used in implementation.
 123   friend class G1ParScanThreadState;
 124   friend class G1ParScanThreadStateSet;
 125   friend class G1ParTask;
 126   friend class G1PLABAllocator;
 127   friend class G1PrepareCompactClosure;
 128 
 129   // Other related classes.
 130   friend class HeapRegionClaimer;
 131 
 132   // Testing classes.
 133   friend class G1CheckCSetFastTableClosure;
 134 
 135 private:
 136   WorkGang* _workers;
 137 
 138   static size_t _humongous_object_threshold_in_words;
 139 
 140   // The secondary free list which contains regions that have been
 141   // freed up during the cleanup process. This will be appended to
 142   // the master free list when appropriate.
 143   FreeRegionList _secondary_free_list;
 144 
 145   // It keeps track of the old regions.
 146   HeapRegionSet _old_set;
 147 
 148   // It keeps track of the humongous regions.
 149   HeapRegionSet _humongous_set;
 150 
 151   void eagerly_reclaim_humongous_regions();
 152 
 153   // The number of regions we could create by expansion.
 154   uint _expansion_regions;
 155 
 156   // The block offset table for the G1 heap.
 157   G1BlockOffsetTable* _bot;
 158 
 159   // Tears down the region sets / lists so that they are empty and the
 160   // regions on the heap do not belong to a region set / list. The
 161   // only exception is the humongous set which we leave unaltered. If
 162   // free_list_only is true, it will only tear down the master free
 163   // list. It is called before a Full GC (free_list_only == false) or
 164   // before heap shrinking (free_list_only == true).
 165   void tear_down_region_sets(bool free_list_only);
 166 
 167   // Rebuilds the region sets / lists so that they are repopulated to
 168   // reflect the contents of the heap. The only exception is the
 169   // humongous set which was not torn down in the first place. If
 170   // free_list_only is true, it will only rebuild the master free
 171   // list. It is called after a Full GC (free_list_only == false) or
 172   // after heap shrinking (free_list_only == true).
 173   void rebuild_region_sets(bool free_list_only);
 174 
 175   // Callback for region mapping changed events.
 176   G1RegionMappingChangedListener _listener;
 177 
 178   // The sequence of all heap regions in the heap.
 179   HeapRegionManager _hrm;
 180 
 181   // Manages all allocations with regions except humongous object allocations.
 182   G1Allocator* _allocator;
 183 
 184   // Outside of GC pauses, the number of bytes used in all regions other
 185   // than the current allocation region(s).
 186   size_t _summary_bytes_used;
 187 
 188   void increase_used(size_t bytes);
 189   void decrease_used(size_t bytes);
 190 
 191   void set_used(size_t bytes);
 192 
 193   // Class that handles archive allocation ranges.
 194   G1ArchiveAllocator* _archive_allocator;
 195 
 196   // Statistics for each allocation context
 197   AllocationContextStats _allocation_context_stats;
 198 
 199   // GC allocation statistics policy for survivors.
 200   G1EvacStats _survivor_evac_stats;
 201 
 202   // GC allocation statistics policy for tenured objects.
 203   G1EvacStats _old_evac_stats;
 204 
 205   // It specifies whether we should attempt to expand the heap after a
 206   // region allocation failure. If heap expansion fails we set this to
 207   // false so that we don't re-attempt the heap expansion (it's likely
 208   // that subsequent expansion attempts will also fail if one fails).
 209   // Currently, it is only consulted during GC and it's reset at the
 210   // start of each GC.
 211   bool _expand_heap_after_alloc_failure;
 212 
 213   // Helper for monitoring and management support.
 214   G1MonitoringSupport* _g1mm;
 215 
 216   // Records whether the region at the given index is (still) a
 217   // candidate for eager reclaim.  Only valid for humongous start
 218   // regions; other regions have unspecified values.  Humongous start
 219   // regions are initialized at start of collection pause, with
 220   // candidates removed from the set as they are found reachable from
 221   // roots or the young generation.
 222   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 223    protected:
 224     bool default_value() const { return false; }
 225    public:
 226     void clear() { G1BiasedMappedArray<bool>::clear(); }
 227     void set_candidate(uint region, bool value) {
 228       set_by_index(region, value);
 229     }
 230     bool is_candidate(uint region) {
 231       return get_by_index(region);
 232     }
 233   };
 234 
 235   HumongousReclaimCandidates _humongous_reclaim_candidates;
 236   // Stores whether during humongous object registration we found candidate regions.
 237   // If not, we can skip a few steps.
 238   bool _has_humongous_reclaim_candidates;
 239 
 240   volatile unsigned _gc_time_stamp;
 241 
 242   G1HRPrinter _hr_printer;
 243 
 244   // It decides whether an explicit GC should start a concurrent cycle
 245   // instead of doing a STW GC. Currently, a concurrent cycle is
 246   // explicitly started if:
 247   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 248   // (b) cause == _g1_humongous_allocation
 249   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 250   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
 251   // (e) cause == _update_allocation_context_stats_inc
 252   // (f) cause == _wb_conc_mark
 253   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 254 
 255   // indicates whether we are in young or mixed GC mode
 256   G1CollectorState _collector_state;
 257 
 258   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 259   // concurrent cycles) we have started.
 260   volatile uint _old_marking_cycles_started;
 261 
 262   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 263   // concurrent cycles) we have completed.
 264   volatile uint _old_marking_cycles_completed;
 265 
 266   bool _heap_summary_sent;
 267 
 268   // This is a non-product method that is helpful for testing. It is
 269   // called at the end of a GC and artificially expands the heap by
 270   // allocating a number of dead regions. This way we can induce very
 271   // frequent marking cycles and stress the cleanup / concurrent
 272   // cleanup code more (as all the regions that will be allocated by
 273   // this method will be found dead by the marking cycle).
 274   void allocate_dummy_regions() PRODUCT_RETURN;
 275 
 276   // Clear RSets after a compaction. It also resets the GC time stamps.
 277   void clear_rsets_post_compaction();
 278 
 279   // If the HR printer is active, dump the state of the regions in the
 280   // heap after a compaction.
 281   void print_hrm_post_compaction();
 282 
 283   // Create a memory mapper for auxiliary data structures of the given size and
 284   // translation factor.
 285   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 286                                                          size_t size,
 287                                                          size_t translation_factor);
 288 
 289   double verify(bool guard, const char* msg);
 290   void verify_before_gc();
 291   void verify_after_gc();
 292 
 293   void log_gc_footer(jlong pause_time_counter);
 294 
 295   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 296 
 297   void process_weak_jni_handles();
 298 
 299   // These are macros so that, if the assert fires, we get the correct
 300   // line number, file, etc.
 301 
 302 #define heap_locking_asserts_params(_extra_message_)                          \
 303   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 304   (_extra_message_),                                                          \
 305   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 306   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 307   BOOL_TO_STR(Thread::current()->is_VM_thread())
 308 
 309 #define assert_heap_locked()                                                  \
 310   do {                                                                        \
 311     assert(Heap_lock->owned_by_self(),                                        \
 312            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 313   } while (0)
 314 
 315 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 316   do {                                                                        \
 317     assert(Heap_lock->owned_by_self() ||                                      \
 318            (SafepointSynchronize::is_at_safepoint() &&                        \
 319              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 320            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 321                                         "should be at a safepoint"));         \
 322   } while (0)
 323 
 324 #define assert_heap_locked_and_not_at_safepoint()                             \
 325   do {                                                                        \
 326     assert(Heap_lock->owned_by_self() &&                                      \
 327                                     !SafepointSynchronize::is_at_safepoint(), \
 328           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 329                                        "should not be at a safepoint"));      \
 330   } while (0)
 331 
 332 #define assert_heap_not_locked()                                              \
 333   do {                                                                        \
 334     assert(!Heap_lock->owned_by_self(),                                       \
 335         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 336   } while (0)
 337 
 338 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 339   do {                                                                        \
 340     assert(!Heap_lock->owned_by_self() &&                                     \
 341                                     !SafepointSynchronize::is_at_safepoint(), \
 342       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 343                                    "should not be at a safepoint"));          \
 344   } while (0)
 345 
 346 #define assert_at_safepoint(_should_be_vm_thread_)                            \
 347   do {                                                                        \
 348     assert(SafepointSynchronize::is_at_safepoint() &&                         \
 349               ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
 350            heap_locking_asserts_params("should be at a safepoint"));          \
 351   } while (0)
 352 
 353 #define assert_not_at_safepoint()                                             \
 354   do {                                                                        \
 355     assert(!SafepointSynchronize::is_at_safepoint(),                          \
 356            heap_locking_asserts_params("should not be at a safepoint"));      \
 357   } while (0)
 358 
 359 protected:
 360 
 361   // The young region list.
 362   YoungList*  _young_list;
 363 
 364   // The current policy object for the collector.
 365   G1CollectorPolicy* _g1_policy;
 366 
 367   // This is the second level of trying to allocate a new region. If
 368   // new_region() didn't find a region on the free_list, this call will
 369   // check whether there's anything available on the
 370   // secondary_free_list and/or wait for more regions to appear on
 371   // that list, if _free_regions_coming is set.
 372   HeapRegion* new_region_try_secondary_free_list(bool is_old);
 373 
 374   // Try to allocate a single non-humongous HeapRegion sufficient for
 375   // an allocation of the given word_size. If do_expand is true,
 376   // attempt to expand the heap if necessary to satisfy the allocation
 377   // request. If the region is to be used as an old region or for a
 378   // humongous object, set is_old to true. If not, to false.
 379   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 380 
 381   // Initialize a contiguous set of free regions of length num_regions
 382   // and starting at index first so that they appear as a single
 383   // humongous region.
 384   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
 385                                                       uint num_regions,
 386                                                       size_t word_size,
 387                                                       AllocationContext_t context);
 388 
 389   // Attempt to allocate a humongous object of the given size. Return
 390   // NULL if unsuccessful.
 391   HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
 392 
 393   // The following two methods, allocate_new_tlab() and
 394   // mem_allocate(), are the two main entry points from the runtime
 395   // into the G1's allocation routines. They have the following
 396   // assumptions:
 397   //
 398   // * They should both be called outside safepoints.
 399   //
 400   // * They should both be called without holding the Heap_lock.
 401   //
 402   // * All allocation requests for new TLABs should go to
 403   //   allocate_new_tlab().
 404   //
 405   // * All non-TLAB allocation requests should go to mem_allocate().
 406   //
 407   // * If either call cannot satisfy the allocation request using the
 408   //   current allocating region, they will try to get a new one. If
 409   //   this fails, they will attempt to do an evacuation pause and
 410   //   retry the allocation.
 411   //
 412   // * If all allocation attempts fail, even after trying to schedule
 413   //   an evacuation pause, allocate_new_tlab() will return NULL,
 414   //   whereas mem_allocate() will attempt a heap expansion and/or
 415   //   schedule a Full GC.
 416   //
 417   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 418   //   should never be called with word_size being humongous. All
 419   //   humongous allocation requests should go to mem_allocate() which
 420   //   will satisfy them with a special path.
 421 
 422   virtual HeapWord* allocate_new_tlab(size_t word_size);
 423 
 424   virtual HeapWord* mem_allocate(size_t word_size,
 425                                  bool*  gc_overhead_limit_was_exceeded);
 426 
 427   // The following three methods take a gc_count_before_ret
 428   // parameter which is used to return the GC count if the method
 429   // returns NULL. Given that we are required to read the GC count
 430   // while holding the Heap_lock, and these paths will take the
 431   // Heap_lock at some point, it's easier to get them to read the GC
 432   // count while holding the Heap_lock before they return NULL instead
 433   // of the caller (namely: mem_allocate()) having to also take the
 434   // Heap_lock just to read the GC count.
 435 
 436   // First-level mutator allocation attempt: try to allocate out of
 437   // the mutator alloc region without taking the Heap_lock. This
 438   // should only be used for non-humongous allocations.
 439   inline HeapWord* attempt_allocation(size_t word_size,
 440                                       uint* gc_count_before_ret,
 441                                       uint* gclocker_retry_count_ret);
 442 
 443   // Second-level mutator allocation attempt: take the Heap_lock and
 444   // retry the allocation attempt, potentially scheduling a GC
 445   // pause. This should only be used for non-humongous allocations.
 446   HeapWord* attempt_allocation_slow(size_t word_size,
 447                                     AllocationContext_t context,
 448                                     uint* gc_count_before_ret,
 449                                     uint* gclocker_retry_count_ret);
 450 
 451   // Takes the Heap_lock and attempts a humongous allocation. It can
 452   // potentially schedule a GC pause.
 453   HeapWord* attempt_allocation_humongous(size_t word_size,
 454                                          uint* gc_count_before_ret,
 455                                          uint* gclocker_retry_count_ret);
 456 
 457   // Allocation attempt that should be called during safepoints (e.g.,
 458   // at the end of a successful GC). expect_null_mutator_alloc_region
 459   // specifies whether the mutator alloc region is expected to be NULL
 460   // or not.
 461   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 462                                             AllocationContext_t context,
 463                                             bool expect_null_mutator_alloc_region);
 464 
 465   // These methods are the "callbacks" from the G1AllocRegion class.
 466 
 467   // For mutator alloc regions.
 468   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 469   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 470                                    size_t allocated_bytes);
 471 
 472   // For GC alloc regions.
 473   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 474                                   InCSetState dest);
 475   void retire_gc_alloc_region(HeapRegion* alloc_region,
 476                               size_t allocated_bytes, InCSetState dest);
 477 
 478   // - if explicit_gc is true, the GC is for a System.gc() etc,
 479   //   otherwise it's for a failed allocation.
 480   // - if clear_all_soft_refs is true, all soft references should be
 481   //   cleared during the GC.
 482   // - it returns false if it is unable to do the collection due to the
 483   //   GC locker being active, true otherwise.
 484   bool do_full_collection(bool explicit_gc,
 485                           bool clear_all_soft_refs);
 486 
 487   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 488   virtual void do_full_collection(bool clear_all_soft_refs);
 489 
 490   // Resize the heap if necessary after a full collection.
 491   void resize_if_necessary_after_full_collection();
 492 
 493   // Callback from VM_G1CollectForAllocation operation.
 494   // This function does everything necessary/possible to satisfy a
 495   // failed allocation request (including collection, expansion, etc.)
 496   HeapWord* satisfy_failed_allocation(size_t word_size,
 497                                       AllocationContext_t context,
 498                                       bool* succeeded);
 499 private:
 500   // Helper method for satisfy_failed_allocation()
 501   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 502                                              AllocationContext_t context,
 503                                              bool do_gc,
 504                                              bool clear_all_soft_refs,
 505                                              bool expect_null_mutator_alloc_region,
 506                                              bool* gc_succeeded);
 507 
 508 protected:
 509   // Attempting to expand the heap sufficiently
 510   // to support an allocation of the given "word_size".  If
 511   // successful, perform the allocation and return the address of the
 512   // allocated block, or else "NULL".
 513   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 514 
 515   // Process any reference objects discovered during
 516   // an incremental evacuation pause.
 517   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 518 
 519   // Enqueue any remaining discovered references
 520   // after processing.
 521   void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 522 
 523 public:
 524   WorkGang* workers() const { return _workers; }
 525 
 526   G1Allocator* allocator() {
 527     return _allocator;
 528   }
 529 
 530   G1MonitoringSupport* g1mm() {
 531     assert(_g1mm != NULL, "should have been initialized");
 532     return _g1mm;
 533   }
 534 
 535   // Expand the garbage-first heap by at least the given size (in bytes!).
 536   // Returns true if the heap was expanded by the requested amount;
 537   // false otherwise.
 538   // (Rounds up to a HeapRegion boundary.)
 539   bool expand(size_t expand_bytes, double* expand_time_ms = NULL);
 540 
 541   // Returns the PLAB statistics for a given destination.
 542   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 543 
 544   // Determines PLAB size for a given destination.
 545   inline size_t desired_plab_sz(InCSetState dest);
 546 
 547   inline AllocationContextStats& allocation_context_stats();
 548 
 549   // Do anything common to GC's.
 550   void gc_prologue(bool full);
 551   void gc_epilogue(bool full);
 552 
 553   // Modify the reclaim candidate set and test for presence.
 554   // These are only valid for starts_humongous regions.
 555   inline void set_humongous_reclaim_candidate(uint region, bool value);
 556   inline bool is_humongous_reclaim_candidate(uint region);
 557 
 558   // Remove from the reclaim candidate set.  Also remove from the
 559   // collection set so that later encounters avoid the slow path.
 560   inline void set_humongous_is_live(oop obj);
 561 
 562   // Register the given region to be part of the collection set.
 563   inline void register_humongous_region_with_cset(uint index);
 564   // Register regions with humongous objects (actually on the start region) in
 565   // the in_cset_fast_test table.
 566   void register_humongous_regions_with_cset();
 567   // We register a region with the fast "in collection set" test. We
 568   // simply set to true the array slot corresponding to this region.
 569   void register_young_region_with_cset(HeapRegion* r) {
 570     _in_cset_fast_test.set_in_young(r->hrm_index());
 571   }
 572   void register_old_region_with_cset(HeapRegion* r) {
 573     _in_cset_fast_test.set_in_old(r->hrm_index());
 574   }
 575   inline void register_ext_region_with_cset(HeapRegion* r) {
 576     _in_cset_fast_test.set_ext(r->hrm_index());
 577   }
 578   void clear_in_cset(const HeapRegion* hr) {
 579     _in_cset_fast_test.clear(hr);
 580   }
 581 
 582   void clear_cset_fast_test() {
 583     _in_cset_fast_test.clear();
 584   }
 585 
 586   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 587 
 588   // This is called at the start of either a concurrent cycle or a Full
 589   // GC to update the number of old marking cycles started.
 590   void increment_old_marking_cycles_started();
 591 
 592   // This is called at the end of either a concurrent cycle or a Full
 593   // GC to update the number of old marking cycles completed. Those two
 594   // can happen in a nested fashion, i.e., we start a concurrent
 595   // cycle, a Full GC happens half-way through it which ends first,
 596   // and then the cycle notices that a Full GC happened and ends
 597   // too. The concurrent parameter is a boolean to help us do a bit
 598   // tighter consistency checking in the method. If concurrent is
 599   // false, the caller is the inner caller in the nesting (i.e., the
 600   // Full GC). If concurrent is true, the caller is the outer caller
 601   // in this nesting (i.e., the concurrent cycle). Further nesting is
 602   // not currently supported. The end of this call also notifies
 603   // the FullGCCount_lock in case a Java thread is waiting for a full
 604   // GC to happen (e.g., it called System.gc() with
 605   // +ExplicitGCInvokesConcurrent).
 606   void increment_old_marking_cycles_completed(bool concurrent);
 607 
 608   uint old_marking_cycles_completed() {
 609     return _old_marking_cycles_completed;
 610   }
 611 
 612   void register_concurrent_cycle_start(const Ticks& start_time);
 613   void register_concurrent_cycle_end();
 614   void trace_heap_after_concurrent_cycle();
 615 
 616   G1HRPrinter* hr_printer() { return &_hr_printer; }
 617 
 618   // Allocates a new heap region instance.
 619   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 620 
 621   // Allocate the highest free region in the reserved heap. This will commit
 622   // regions as necessary.
 623   HeapRegion* alloc_highest_free_region();
 624 
 625   // Frees a non-humongous region by initializing its contents and
 626   // adding it to the free list that's passed as a parameter (this is
 627   // usually a local list which will be appended to the master free
 628   // list later). The used bytes of freed regions are accumulated in
 629   // pre_used. If par is true, the region's RSet will not be freed
 630   // up. The assumption is that this will be done later.
 631   // The locked parameter indicates if the caller has already taken
 632   // care of proper synchronization. This may allow some optimizations.
 633   void free_region(HeapRegion* hr,
 634                    FreeRegionList* free_list,
 635                    bool par,
 636                    bool locked = false);
 637 
 638   // It dirties the cards that cover the block so that the post
 639   // write barrier never queues anything when updating objects on this
 640   // block. It is assumed (and in fact we assert) that the block
 641   // belongs to a young region.
 642   inline void dirty_young_block(HeapWord* start, size_t word_size);
 643 
 644   // Frees a humongous region by collapsing it into individual regions
 645   // and calling free_region() for each of them. The freed regions
 646   // will be added to the free list that's passed as a parameter (this
 647   // is usually a local list which will be appended to the master free
 648   // list later). The used bytes of freed regions are accumulated in
 649   // pre_used. If par is true, the region's RSet will not be freed
 650   // up. The assumption is that this will be done later.
 651   void free_humongous_region(HeapRegion* hr,
 652                              FreeRegionList* free_list,
 653                              bool par);
 654 
 655   // Facility for allocating in 'archive' regions in high heap memory and
 656   // recording the allocated ranges. These should all be called from the
 657   // VM thread at safepoints, without the heap lock held. They can be used
 658   // to create and archive a set of heap regions which can be mapped at the
 659   // same fixed addresses in a subsequent JVM invocation.
 660   void begin_archive_alloc_range();
 661 
 662   // Check if the requested size would be too large for an archive allocation.
 663   bool is_archive_alloc_too_large(size_t word_size);
 664 
 665   // Allocate memory of the requested size from the archive region. This will
 666   // return NULL if the size is too large or if no memory is available. It
 667   // does not trigger a garbage collection.
 668   HeapWord* archive_mem_allocate(size_t word_size);
 669 
 670   // Optionally aligns the end address and returns the allocated ranges in
 671   // an array of MemRegions in order of ascending addresses.
 672   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 673                                size_t end_alignment_in_bytes = 0);
 674 
 675   // Facility for allocating a fixed range within the heap and marking
 676   // the containing regions as 'archive'. For use at JVM init time, when the
 677   // caller may mmap archived heap data at the specified range(s).
 678   // Verify that the MemRegions specified in the argument array are within the
 679   // reserved heap.
 680   bool check_archive_addresses(MemRegion* range, size_t count);
 681 
 682   // Commit the appropriate G1 regions containing the specified MemRegions
 683   // and mark them as 'archive' regions. The regions in the array must be
 684   // non-overlapping and in order of ascending address.
 685   bool alloc_archive_regions(MemRegion* range, size_t count);
 686 
 687   // Insert any required filler objects in the G1 regions around the specified
 688   // ranges to make the regions parseable. This must be called after
 689   // alloc_archive_regions, and after class loading has occurred.
 690   void fill_archive_regions(MemRegion* range, size_t count);
 691 
 692   // For each of the specified MemRegions, uncommit the containing G1 regions
 693   // which had been allocated by alloc_archive_regions. This should be called
 694   // rather than fill_archive_regions at JVM init time if the archive file
 695   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 696   void dealloc_archive_regions(MemRegion* range, size_t count);
 697 
 698 protected:
 699 
 700   // Shrink the garbage-first heap by at most the given size (in bytes!).
 701   // (Rounds down to a HeapRegion boundary.)
 702   virtual void shrink(size_t expand_bytes);
 703   void shrink_helper(size_t expand_bytes);
 704 
 705   #if TASKQUEUE_STATS
 706   static void print_taskqueue_stats_hdr(outputStream* const st);
 707   void print_taskqueue_stats() const;
 708   void reset_taskqueue_stats();
 709   #endif // TASKQUEUE_STATS
 710 
 711   // Schedule the VM operation that will do an evacuation pause to
 712   // satisfy an allocation request of word_size. *succeeded will
 713   // return whether the VM operation was successful (it did do an
 714   // evacuation pause) or not (another thread beat us to it or the GC
 715   // locker was active). Given that we should not be holding the
 716   // Heap_lock when we enter this method, we will pass the
 717   // gc_count_before (i.e., total_collections()) as a parameter since
 718   // it has to be read while holding the Heap_lock. Currently, both
 719   // methods that call do_collection_pause() release the Heap_lock
 720   // before the call, so it's easy to read gc_count_before just before.
 721   HeapWord* do_collection_pause(size_t         word_size,
 722                                 uint           gc_count_before,
 723                                 bool*          succeeded,
 724                                 GCCause::Cause gc_cause);
 725 
 726   void wait_for_root_region_scanning();
 727 
 728   // The guts of the incremental collection pause, executed by the vm
 729   // thread. It returns false if it is unable to do the collection due
 730   // to the GC locker being active, true otherwise
 731   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 732 
 733   // Actually do the work of evacuating the collection set.
 734   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 735 
 736   void pre_evacuate_collection_set();
 737   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 738 
 739   // Print the header for the per-thread termination statistics.
 740   static void print_termination_stats_hdr();
 741   // Print actual per-thread termination statistics.
 742   void print_termination_stats(uint worker_id,
 743                                double elapsed_ms,
 744                                double strong_roots_ms,
 745                                double term_ms,
 746                                size_t term_attempts,
 747                                size_t alloc_buffer_waste,
 748                                size_t undo_waste) const;
 749   // Update object copying statistics.
 750   void record_obj_copy_mem_stats();
 751 
 752   // The g1 remembered set of the heap.
 753   G1RemSet* _g1_rem_set;
 754 
 755   // A set of cards that cover the objects for which the Rsets should be updated
 756   // concurrently after the collection.
 757   DirtyCardQueueSet _dirty_card_queue_set;
 758 
 759   // The closure used to refine a single card.
 760   RefineCardTableEntryClosure* _refine_cte_cl;
 761 
 762   // After a collection pause, make the regions in the CS into free
 763   // regions.
 764   void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 765 
 766   // Abandon the current collection set without recording policy
 767   // statistics or updating free lists.
 768   void abandon_collection_set(HeapRegion* cs_head);
 769 
 770   // The concurrent marker (and the thread it runs in.)
 771   ConcurrentMark* _cm;
 772   ConcurrentMarkThread* _cmThread;
 773 
 774   // The concurrent refiner.
 775   ConcurrentG1Refine* _cg1r;
 776 
 777   // The parallel task queues
 778   RefToScanQueueSet *_task_queues;
 779 
 780   // True iff a evacuation has failed in the current collection.
 781   bool _evacuation_failed;
 782 
 783   EvacuationFailedInfo* _evacuation_failed_info_array;
 784 
 785   // Failed evacuations cause some logical from-space objects to have
 786   // forwarding pointers to themselves.  Reset them.
 787   void remove_self_forwarding_pointers();
 788 
 789   // Restore the preserved mark words for objects with self-forwarding pointers.
 790   void restore_preserved_marks();
 791 
 792   // Restore the objects in the regions in the collection set after an
 793   // evacuation failure.
 794   void restore_after_evac_failure();
 795 
 796   // Stores marks with the corresponding oop that we need to preserve during evacuation
 797   // failure.
 798   OopAndMarkOopStack*  _preserved_objs;
 799 
 800   // Preserve the mark of "obj", if necessary, in preparation for its mark
 801   // word being overwritten with a self-forwarding-pointer.
 802   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
 803 
 804 #ifndef PRODUCT
 805   // Support for forcing evacuation failures. Analogous to
 806   // PromotionFailureALot for the other collectors.
 807 
 808   // Records whether G1EvacuationFailureALot should be in effect
 809   // for the current GC
 810   bool _evacuation_failure_alot_for_current_gc;
 811 
 812   // Used to record the GC number for interval checking when
 813   // determining whether G1EvaucationFailureALot is in effect
 814   // for the current GC.
 815   size_t _evacuation_failure_alot_gc_number;
 816 
 817   // Count of the number of evacuations between failures.
 818   volatile size_t _evacuation_failure_alot_count;
 819 
 820   // Set whether G1EvacuationFailureALot should be in effect
 821   // for the current GC (based upon the type of GC and which
 822   // command line flags are set);
 823   inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
 824                                                   bool during_initial_mark,
 825                                                   bool during_marking);
 826 
 827   inline void set_evacuation_failure_alot_for_current_gc();
 828 
 829   // Return true if it's time to cause an evacuation failure.
 830   inline bool evacuation_should_fail();
 831 
 832   // Reset the G1EvacuationFailureALot counters.  Should be called at
 833   // the end of an evacuation pause in which an evacuation failure occurred.
 834   inline void reset_evacuation_should_fail();
 835 #endif // !PRODUCT
 836 
 837   // ("Weak") Reference processing support.
 838   //
 839   // G1 has 2 instances of the reference processor class. One
 840   // (_ref_processor_cm) handles reference object discovery
 841   // and subsequent processing during concurrent marking cycles.
 842   //
 843   // The other (_ref_processor_stw) handles reference object
 844   // discovery and processing during full GCs and incremental
 845   // evacuation pauses.
 846   //
 847   // During an incremental pause, reference discovery will be
 848   // temporarily disabled for _ref_processor_cm and will be
 849   // enabled for _ref_processor_stw. At the end of the evacuation
 850   // pause references discovered by _ref_processor_stw will be
 851   // processed and discovery will be disabled. The previous
 852   // setting for reference object discovery for _ref_processor_cm
 853   // will be re-instated.
 854   //
 855   // At the start of marking:
 856   //  * Discovery by the CM ref processor is verified to be inactive
 857   //    and it's discovered lists are empty.
 858   //  * Discovery by the CM ref processor is then enabled.
 859   //
 860   // At the end of marking:
 861   //  * Any references on the CM ref processor's discovered
 862   //    lists are processed (possibly MT).
 863   //
 864   // At the start of full GC we:
 865   //  * Disable discovery by the CM ref processor and
 866   //    empty CM ref processor's discovered lists
 867   //    (without processing any entries).
 868   //  * Verify that the STW ref processor is inactive and it's
 869   //    discovered lists are empty.
 870   //  * Temporarily set STW ref processor discovery as single threaded.
 871   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 872   //    field.
 873   //  * Finally enable discovery by the STW ref processor.
 874   //
 875   // The STW ref processor is used to record any discovered
 876   // references during the full GC.
 877   //
 878   // At the end of a full GC we:
 879   //  * Enqueue any reference objects discovered by the STW ref processor
 880   //    that have non-live referents. This has the side-effect of
 881   //    making the STW ref processor inactive by disabling discovery.
 882   //  * Verify that the CM ref processor is still inactive
 883   //    and no references have been placed on it's discovered
 884   //    lists (also checked as a precondition during initial marking).
 885 
 886   // The (stw) reference processor...
 887   ReferenceProcessor* _ref_processor_stw;
 888 
 889   STWGCTimer* _gc_timer_stw;
 890   ConcurrentGCTimer* _gc_timer_cm;
 891 
 892   G1OldTracer* _gc_tracer_cm;
 893   G1NewTracer* _gc_tracer_stw;
 894 
 895   // During reference object discovery, the _is_alive_non_header
 896   // closure (if non-null) is applied to the referent object to
 897   // determine whether the referent is live. If so then the
 898   // reference object does not need to be 'discovered' and can
 899   // be treated as a regular oop. This has the benefit of reducing
 900   // the number of 'discovered' reference objects that need to
 901   // be processed.
 902   //
 903   // Instance of the is_alive closure for embedding into the
 904   // STW reference processor as the _is_alive_non_header field.
 905   // Supplying a value for the _is_alive_non_header field is
 906   // optional but doing so prevents unnecessary additions to
 907   // the discovered lists during reference discovery.
 908   G1STWIsAliveClosure _is_alive_closure_stw;
 909 
 910   // The (concurrent marking) reference processor...
 911   ReferenceProcessor* _ref_processor_cm;
 912 
 913   // Instance of the concurrent mark is_alive closure for embedding
 914   // into the Concurrent Marking reference processor as the
 915   // _is_alive_non_header field. Supplying a value for the
 916   // _is_alive_non_header field is optional but doing so prevents
 917   // unnecessary additions to the discovered lists during reference
 918   // discovery.
 919   G1CMIsAliveClosure _is_alive_closure_cm;
 920 
 921   // Cache used by G1CollectedHeap::start_cset_region_for_worker().
 922   HeapRegion** _worker_cset_start_region;
 923 
 924   // Time stamp to validate the regions recorded in the cache
 925   // used by G1CollectedHeap::start_cset_region_for_worker().
 926   // The heap region entry for a given worker is valid iff
 927   // the associated time stamp value matches the current value
 928   // of G1CollectedHeap::_gc_time_stamp.
 929   uint* _worker_cset_start_region_time_stamp;
 930 
 931   volatile bool _free_regions_coming;
 932 
 933 public:
 934 
 935   void set_refine_cte_cl_concurrency(bool concurrent);
 936 
 937   RefToScanQueue *task_queue(uint i) const;
 938 
 939   uint num_task_queues() const;
 940 
 941   // A set of cards where updates happened during the GC
 942   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 943 
 944   // Create a G1CollectedHeap with the specified policy.
 945   // Must call the initialize method afterwards.
 946   // May not return if something goes wrong.
 947   G1CollectedHeap(G1CollectorPolicy* policy);
 948 
 949   // Initialize the G1CollectedHeap to have the initial and
 950   // maximum sizes and remembered and barrier sets
 951   // specified by the policy object.
 952   jint initialize();
 953 
 954   virtual void stop();
 955 
 956   // Return the (conservative) maximum heap alignment for any G1 heap
 957   static size_t conservative_max_heap_alignment();
 958 
 959   // Does operations required after initialization has been done.
 960   void post_initialize();
 961 
 962   // Initialize weak reference processing.
 963   void ref_processing_init();
 964 
 965   virtual Name kind() const {
 966     return CollectedHeap::G1CollectedHeap;
 967   }
 968 
 969   virtual const char* name() const {
 970     return "G1";
 971   }
 972 
 973   const G1CollectorState* collector_state() const { return &_collector_state; }
 974   G1CollectorState* collector_state() { return &_collector_state; }
 975 
 976   // The current policy object for the collector.
 977   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
 978 
 979   virtual CollectorPolicy* collector_policy() const;
 980 
 981   // Adaptive size policy.  No such thing for g1.
 982   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
 983 
 984   // The rem set and barrier set.
 985   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
 986 
 987   void scrub_rem_set(BitMap* region_bm, BitMap* card_bm);
 988 
 989   unsigned get_gc_time_stamp() {
 990     return _gc_time_stamp;
 991   }
 992 
 993   inline void reset_gc_time_stamp();
 994 
 995   void check_gc_time_stamps() PRODUCT_RETURN;
 996 
 997   inline void increment_gc_time_stamp();
 998 
 999   // Reset the given region's GC timestamp. If it's starts humongous,
1000   // also reset the GC timestamp of its corresponding
1001   // continues humongous regions too.
1002   void reset_gc_time_stamps(HeapRegion* hr);
1003 
1004   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1005   void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
1006 
1007   // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
1008   void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
1009 
1010   // The shared block offset table array.
1011   G1BlockOffsetTable* bot() const { return _bot; }
1012 
1013   // Reference Processing accessors
1014 
1015   // The STW reference processor....
1016   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1017 
1018   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1019 
1020   // The Concurrent Marking reference processor...
1021   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1022 
1023   ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1024   G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1025 
1026   virtual size_t capacity() const;
1027   virtual size_t used() const;
1028   // This should be called when we're not holding the heap lock. The
1029   // result might be a bit inaccurate.
1030   size_t used_unlocked() const;
1031   size_t recalculate_used() const;
1032 
1033   // These virtual functions do the actual allocation.
1034   // Some heaps may offer a contiguous region for shared non-blocking
1035   // allocation, via inlined code (by exporting the address of the top and
1036   // end fields defining the extent of the contiguous allocation region.)
1037   // But G1CollectedHeap doesn't yet support this.
1038 
1039   virtual bool is_maximal_no_gc() const {
1040     return _hrm.available() == 0;
1041   }
1042 
1043   // The current number of regions in the heap.
1044   uint num_regions() const { return _hrm.length(); }
1045 
1046   // The max number of regions in the heap.
1047   uint max_regions() const { return _hrm.max_length(); }
1048 
1049   // The number of regions that are completely free.
1050   uint num_free_regions() const { return _hrm.num_free_regions(); }
1051 
1052   MemoryUsage get_auxiliary_data_memory_usage() const {
1053     return _hrm.get_auxiliary_data_memory_usage();
1054   }
1055 
1056   // The number of regions that are not completely free.
1057   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1058 
1059   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1060   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1061   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1062   void verify_dirty_young_regions() PRODUCT_RETURN;
1063 
1064 #ifndef PRODUCT
1065   // Make sure that the given bitmap has no marked objects in the
1066   // range [from,limit). If it does, print an error message and return
1067   // false. Otherwise, just return true. bitmap_name should be "prev"
1068   // or "next".
1069   bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1070                                 HeapWord* from, HeapWord* limit);
1071 
1072   // Verify that the prev / next bitmap range [tams,end) for the given
1073   // region has no marks. Return true if all is well, false if errors
1074   // are detected.
1075   bool verify_bitmaps(const char* caller, HeapRegion* hr);
1076 #endif // PRODUCT
1077 
1078   // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1079   // the given region do not have any spurious marks. If errors are
1080   // detected, print appropriate error messages and crash.
1081   void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1082 
1083   // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1084   // have any spurious marks. If errors are detected, print
1085   // appropriate error messages and crash.
1086   void check_bitmaps(const char* caller) PRODUCT_RETURN;
1087 
1088   // Do sanity check on the contents of the in-cset fast test table.
1089   bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1090 
1091   // verify_region_sets() performs verification over the region
1092   // lists. It will be compiled in the product code to be used when
1093   // necessary (i.e., during heap verification).
1094   void verify_region_sets();
1095 
1096   // verify_region_sets_optional() is planted in the code for
1097   // list verification in non-product builds (and it can be enabled in
1098   // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1099 #if HEAP_REGION_SET_FORCE_VERIFY
1100   void verify_region_sets_optional() {
1101     verify_region_sets();
1102   }
1103 #else // HEAP_REGION_SET_FORCE_VERIFY
1104   void verify_region_sets_optional() { }
1105 #endif // HEAP_REGION_SET_FORCE_VERIFY
1106 
1107 #ifdef ASSERT
1108   bool is_on_master_free_list(HeapRegion* hr) {
1109     return _hrm.is_free(hr);
1110   }
1111 #endif // ASSERT
1112 
1113   // Wrapper for the region list operations that can be called from
1114   // methods outside this class.
1115 
1116   void secondary_free_list_add(FreeRegionList* list) {
1117     _secondary_free_list.add_ordered(list);
1118   }
1119 
1120   void append_secondary_free_list() {
1121     _hrm.insert_list_into_free_list(&_secondary_free_list);
1122   }
1123 
1124   void append_secondary_free_list_if_not_empty_with_lock() {
1125     // If the secondary free list looks empty there's no reason to
1126     // take the lock and then try to append it.
1127     if (!_secondary_free_list.is_empty()) {
1128       MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1129       append_secondary_free_list();
1130     }
1131   }
1132 
1133   inline void old_set_add(HeapRegion* hr);
1134   inline void old_set_remove(HeapRegion* hr);
1135 
1136   size_t non_young_capacity_bytes() {
1137     return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
1138   }
1139 
1140   void set_free_regions_coming();
1141   void reset_free_regions_coming();
1142   bool free_regions_coming() { return _free_regions_coming; }
1143   void wait_while_free_regions_coming();
1144 
1145   // Determine whether the given region is one that we are using as an
1146   // old GC alloc region.
1147   bool is_old_gc_alloc_region(HeapRegion* hr);
1148 
1149   // Perform a collection of the heap; intended for use in implementing
1150   // "System.gc".  This probably implies as full a collection as the
1151   // "CollectedHeap" supports.
1152   virtual void collect(GCCause::Cause cause);
1153 
1154   virtual bool copy_allocation_context_stats(const jint* contexts,
1155                                              jlong* totals,
1156                                              jbyte* accuracy,
1157                                              jint len);
1158 
1159   // True iff an evacuation has failed in the most-recent collection.
1160   bool evacuation_failed() { return _evacuation_failed; }
1161 
1162   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1163   void prepend_to_freelist(FreeRegionList* list);
1164   void decrement_summary_bytes(size_t bytes);
1165 
1166   virtual bool is_in(const void* p) const;
1167 #ifdef ASSERT
1168   // Returns whether p is in one of the available areas of the heap. Slow but
1169   // extensive version.
1170   bool is_in_exact(const void* p) const;
1171 #endif
1172 
1173   // Return "TRUE" iff the given object address is within the collection
1174   // set. Slow implementation.
1175   bool obj_in_cs(oop obj);
1176 
1177   inline bool is_in_cset(const HeapRegion *hr);
1178   inline bool is_in_cset(oop obj);
1179 
1180   inline bool is_in_cset_or_humongous(const oop obj);
1181 
1182  private:
1183   // This array is used for a quick test on whether a reference points into
1184   // the collection set or not. Each of the array's elements denotes whether the
1185   // corresponding region is in the collection set or not.
1186   G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1187 
1188  public:
1189 
1190   inline InCSetState in_cset_state(const oop obj);
1191 
1192   // Return "TRUE" iff the given object address is in the reserved
1193   // region of g1.
1194   bool is_in_g1_reserved(const void* p) const {
1195     return _hrm.reserved().contains(p);
1196   }
1197 
1198   // Returns a MemRegion that corresponds to the space that has been
1199   // reserved for the heap
1200   MemRegion g1_reserved() const {
1201     return _hrm.reserved();
1202   }
1203 
1204   virtual bool is_in_closed_subset(const void* p) const;
1205 
1206   G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1207     return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
1208   }
1209 
1210   // This resets the card table to all zeros.  It is used after
1211   // a collection pause which used the card table to claim cards.
1212   void cleanUpCardTable();
1213 
1214   // Iteration functions.
1215 
1216   // Iterate over all objects, calling "cl.do_object" on each.
1217   virtual void object_iterate(ObjectClosure* cl);
1218 
1219   virtual void safe_object_iterate(ObjectClosure* cl) {
1220     object_iterate(cl);
1221   }
1222 
1223   // Iterate over heap regions, in address order, terminating the
1224   // iteration early if the "doHeapRegion" method returns "true".
1225   void heap_region_iterate(HeapRegionClosure* blk) const;
1226 
1227   // Return the region with the given index. It assumes the index is valid.
1228   inline HeapRegion* region_at(uint index) const;
1229 
1230   // Return the next region (by index) that is part of the same
1231   // humongous object that hr is part of.
1232   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1233 
1234   // Calculate the region index of the given address. Given address must be
1235   // within the heap.
1236   inline uint addr_to_region(HeapWord* addr) const;
1237 
1238   inline HeapWord* bottom_addr_for_region(uint index) const;
1239 
1240   // Iterate over the heap regions in parallel. Assumes that this will be called
1241   // in parallel by ParallelGCThreads worker threads with distinct worker ids
1242   // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1243   // to each of the regions, by attempting to claim the region using the
1244   // HeapRegionClaimer and, if successful, applying the closure to the claimed
1245   // region. The concurrent argument should be set to true if iteration is
1246   // performed concurrently, during which no assumptions are made for consistent
1247   // attributes of the heap regions (as they might be modified while iterating).
1248   void heap_region_par_iterate(HeapRegionClosure* cl,
1249                                uint worker_id,
1250                                HeapRegionClaimer* hrclaimer,
1251                                bool concurrent = false) const;
1252 
1253   // Clear the cached cset start regions and (more importantly)
1254   // the time stamps. Called when we reset the GC time stamp.
1255   void clear_cset_start_regions();
1256 
1257   // Given the id of a worker, obtain or calculate a suitable
1258   // starting region for iterating over the current collection set.
1259   HeapRegion* start_cset_region_for_worker(uint worker_i);
1260 
1261   // Iterate over the regions (if any) in the current collection set.
1262   void collection_set_iterate(HeapRegionClosure* blk);
1263 
1264   // As above but starting from region r
1265   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1266 
1267   HeapRegion* next_compaction_region(const HeapRegion* from) const;
1268 
1269   // Returns the HeapRegion that contains addr. addr must not be NULL.
1270   template <class T>
1271   inline HeapRegion* heap_region_containing(const T addr) const;
1272 
1273   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1274   // each address in the (reserved) heap is a member of exactly
1275   // one block.  The defining characteristic of a block is that it is
1276   // possible to find its size, and thus to progress forward to the next
1277   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1278   // represent Java objects, or they might be free blocks in a
1279   // free-list-based heap (or subheap), as long as the two kinds are
1280   // distinguishable and the size of each is determinable.
1281 
1282   // Returns the address of the start of the "block" that contains the
1283   // address "addr".  We say "blocks" instead of "object" since some heaps
1284   // may not pack objects densely; a chunk may either be an object or a
1285   // non-object.
1286   virtual HeapWord* block_start(const void* addr) const;
1287 
1288   // Requires "addr" to be the start of a chunk, and returns its size.
1289   // "addr + size" is required to be the start of a new chunk, or the end
1290   // of the active area of the heap.
1291   virtual size_t block_size(const HeapWord* addr) const;
1292 
1293   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1294   // the block is an object.
1295   virtual bool block_is_obj(const HeapWord* addr) const;
1296 
1297   // Section on thread-local allocation buffers (TLABs)
1298   // See CollectedHeap for semantics.
1299 
1300   bool supports_tlab_allocation() const;
1301   size_t tlab_capacity(Thread* ignored) const;
1302   size_t tlab_used(Thread* ignored) const;
1303   size_t max_tlab_size() const;
1304   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1305 
1306   // Can a compiler initialize a new object without store barriers?
1307   // This permission only extends from the creation of a new object
1308   // via a TLAB up to the first subsequent safepoint. If such permission
1309   // is granted for this heap type, the compiler promises to call
1310   // defer_store_barrier() below on any slow path allocation of
1311   // a new object for which such initializing store barriers will
1312   // have been elided. G1, like CMS, allows this, but should be
1313   // ready to provide a compensating write barrier as necessary
1314   // if that storage came out of a non-young region. The efficiency
1315   // of this implementation depends crucially on being able to
1316   // answer very efficiently in constant time whether a piece of
1317   // storage in the heap comes from a young region or not.
1318   // See ReduceInitialCardMarks.
1319   virtual bool can_elide_tlab_store_barriers() const {
1320     return true;
1321   }
1322 
1323   virtual bool card_mark_must_follow_store() const {
1324     return true;
1325   }
1326 
1327   inline bool is_in_young(const oop obj);
1328 
1329   virtual bool is_scavengable(const void* addr);
1330 
1331   // We don't need barriers for initializing stores to objects
1332   // in the young gen: for the SATB pre-barrier, there is no
1333   // pre-value that needs to be remembered; for the remembered-set
1334   // update logging post-barrier, we don't maintain remembered set
1335   // information for young gen objects.
1336   virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1337 
1338   // Returns "true" iff the given word_size is "very large".
1339   static bool is_humongous(size_t word_size) {
1340     // Note this has to be strictly greater-than as the TLABs
1341     // are capped at the humongous threshold and we want to
1342     // ensure that we don't try to allocate a TLAB as
1343     // humongous and that we don't allocate a humongous
1344     // object in a TLAB.
1345     return word_size > _humongous_object_threshold_in_words;
1346   }
1347 
1348   // Returns the humongous threshold for a specific region size
1349   static size_t humongous_threshold_for(size_t region_size) {
1350     return (region_size / 2);
1351   }
1352 
1353   // Returns the number of regions the humongous object of the given word size
1354   // requires.
1355   static size_t humongous_obj_size_in_regions(size_t word_size);
1356 
1357   // Print the maximum heap capacity.
1358   virtual size_t max_capacity() const;
1359 
1360   virtual jlong millis_since_last_gc();
1361 
1362 
1363   // Convenience function to be used in situations where the heap type can be
1364   // asserted to be this type.
1365   static G1CollectedHeap* heap();
1366 
1367   void set_region_short_lived_locked(HeapRegion* hr);
1368   // add appropriate methods for any other surv rate groups
1369 
1370   YoungList* young_list() const { return _young_list; }
1371 
1372   uint old_regions_count() const { return _old_set.length(); }
1373 
1374   uint humongous_regions_count() const { return _humongous_set.length(); }
1375 
1376   // debugging
1377   bool check_young_list_well_formed() {
1378     return _young_list->check_list_well_formed();
1379   }
1380 
1381   bool check_young_list_empty(bool check_heap,
1382                               bool check_sample = true);
1383 
1384   // *** Stuff related to concurrent marking.  It's not clear to me that so
1385   // many of these need to be public.
1386 
1387   // The functions below are helper functions that a subclass of
1388   // "CollectedHeap" can use in the implementation of its virtual
1389   // functions.
1390   // This performs a concurrent marking of the live objects in a
1391   // bitmap off to the side.
1392   void doConcurrentMark();
1393 
1394   bool isMarkedPrev(oop obj) const;
1395   bool isMarkedNext(oop obj) const;
1396 
1397   // Determine if an object is dead, given the object and also
1398   // the region to which the object belongs. An object is dead
1399   // iff a) it was not allocated since the last mark, b) it
1400   // is not marked, and c) it is not in an archive region.
1401   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1402     return
1403       !hr->obj_allocated_since_prev_marking(obj) &&
1404       !isMarkedPrev(obj) &&
1405       !hr->is_archive();
1406   }
1407 
1408   // This function returns true when an object has been
1409   // around since the previous marking and hasn't yet
1410   // been marked during this marking, and is not in an archive region.
1411   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1412     return
1413       !hr->obj_allocated_since_next_marking(obj) &&
1414       !isMarkedNext(obj) &&
1415       !hr->is_archive();
1416   }
1417 
1418   // Determine if an object is dead, given only the object itself.
1419   // This will find the region to which the object belongs and
1420   // then call the region version of the same function.
1421 
1422   // Added if it is NULL it isn't dead.
1423 
1424   inline bool is_obj_dead(const oop obj) const;
1425 
1426   inline bool is_obj_ill(const oop obj) const;
1427 
1428   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1429   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1430   bool is_marked(oop obj, VerifyOption vo);
1431   const char* top_at_mark_start_str(VerifyOption vo);
1432 
1433   ConcurrentMark* concurrent_mark() const { return _cm; }
1434 
1435   // Refinement
1436 
1437   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1438 
1439   // The dirty cards region list is used to record a subset of regions
1440   // whose cards need clearing. The list if populated during the
1441   // remembered set scanning and drained during the card table
1442   // cleanup. Although the methods are reentrant, population/draining
1443   // phases must not overlap. For synchronization purposes the last
1444   // element on the list points to itself.
1445   HeapRegion* _dirty_cards_region_list;
1446   void push_dirty_cards_region(HeapRegion* hr);
1447   HeapRegion* pop_dirty_cards_region();
1448 
1449   // Optimized nmethod scanning support routines
1450 
1451   // Register the given nmethod with the G1 heap.
1452   virtual void register_nmethod(nmethod* nm);
1453 
1454   // Unregister the given nmethod from the G1 heap.
1455   virtual void unregister_nmethod(nmethod* nm);
1456 
1457   // Free up superfluous code root memory.
1458   void purge_code_root_memory();
1459 
1460   // Rebuild the strong code root lists for each region
1461   // after a full GC.
1462   void rebuild_strong_code_roots();
1463 
1464   // Delete entries for dead interned string and clean up unreferenced symbols
1465   // in symbol table, possibly in parallel.
1466   void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1467 
1468   // Parallel phase of unloading/cleaning after G1 concurrent mark.
1469   void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
1470 
1471   // Redirty logged cards in the refinement queue.
1472   void redirty_logged_cards();
1473   // Verification
1474 
1475   // Perform any cleanup actions necessary before allowing a verification.
1476   virtual void prepare_for_verify();
1477 
1478   // Perform verification.
1479 
1480   // vo == UsePrevMarking  -> use "prev" marking information,
1481   // vo == UseNextMarking -> use "next" marking information
1482   // vo == UseMarkWord    -> use the mark word in the object header
1483   //
1484   // NOTE: Only the "prev" marking information is guaranteed to be
1485   // consistent most of the time, so most calls to this should use
1486   // vo == UsePrevMarking.
1487   // Currently, there is only one case where this is called with
1488   // vo == UseNextMarking, which is to verify the "next" marking
1489   // information at the end of remark.
1490   // Currently there is only one place where this is called with
1491   // vo == UseMarkWord, which is to verify the marking during a
1492   // full GC.
1493   void verify(VerifyOption vo);
1494 
1495   // The methods below are here for convenience and dispatch the
1496   // appropriate method depending on value of the given VerifyOption
1497   // parameter. The values for that parameter, and their meanings,
1498   // are the same as those above.
1499 
1500   bool is_obj_dead_cond(const oop obj,
1501                         const HeapRegion* hr,
1502                         const VerifyOption vo) const;
1503 
1504   bool is_obj_dead_cond(const oop obj,
1505                         const VerifyOption vo) const;
1506 
1507   G1HeapSummary create_g1_heap_summary();
1508   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1509 
1510   // Printing
1511 
1512   virtual void print_on(outputStream* st) const;
1513   virtual void print_extended_on(outputStream* st) const;
1514   virtual void print_on_error(outputStream* st) const;
1515 
1516   virtual void print_gc_threads_on(outputStream* st) const;
1517   virtual void gc_threads_do(ThreadClosure* tc) const;
1518 
1519   // Override
1520   void print_tracing_info() const;
1521 
1522   // The following two methods are helpful for debugging RSet issues.
1523   void print_cset_rsets() PRODUCT_RETURN;
1524   void print_all_rsets() PRODUCT_RETURN;
1525 
1526 public:
1527   size_t pending_card_num();
1528 
1529 protected:
1530   size_t _max_heap_capacity;
1531 };
1532 
1533 class G1ParEvacuateFollowersClosure : public VoidClosure {
1534 private:
1535   double _start_term;
1536   double _term_time;
1537   size_t _term_attempts;
1538 
1539   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1540   void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
1541 protected:
1542   G1CollectedHeap*              _g1h;
1543   G1ParScanThreadState*         _par_scan_state;
1544   RefToScanQueueSet*            _queues;
1545   ParallelTaskTerminator*       _terminator;
1546 
1547   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1548   RefToScanQueueSet*      queues()         { return _queues; }
1549   ParallelTaskTerminator* terminator()     { return _terminator; }
1550 
1551 public:
1552   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1553                                 G1ParScanThreadState* par_scan_state,
1554                                 RefToScanQueueSet* queues,
1555                                 ParallelTaskTerminator* terminator)
1556     : _g1h(g1h), _par_scan_state(par_scan_state),
1557       _queues(queues), _terminator(terminator),
1558       _start_term(0.0), _term_time(0.0), _term_attempts(0) {}
1559 
1560   void do_void();
1561 
1562   double term_time() const { return _term_time; }
1563   size_t term_attempts() const { return _term_attempts; }
1564 
1565 private:
1566   inline bool offer_termination();
1567 };
1568 
1569 #endif // SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP