1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1DirtyCardQueue.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacFailure.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"
  38 #include "gc/g1/g1EvacuationInfo.hpp"
  39 #include "gc/g1/g1GCPhaseTimes.hpp"
  40 #include "gc/g1/g1HeapTransition.hpp"
  41 #include "gc/g1/g1HeapVerifier.hpp"
  42 #include "gc/g1/g1HRPrinter.hpp"
  43 #include "gc/g1/g1InCSetState.hpp"
  44 #include "gc/g1/g1MonitoringSupport.hpp"
  45 #include "gc/g1/g1SurvivorRegions.hpp"
  46 #include "gc/g1/g1YCTypes.hpp"
  47 #include "gc/g1/heapRegionManager.hpp"
  48 #include "gc/g1/heapRegionSet.hpp"
  49 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  50 #include "gc/shared/barrierSet.hpp"
  51 #include "gc/shared/collectedHeap.hpp"
  52 #include "gc/shared/gcHeapSummary.hpp"
  53 #include "gc/shared/plab.hpp"
  54 #include "gc/shared/preservedMarks.hpp"
  55 #include "gc/shared/softRefPolicy.hpp"
  56 #include "memory/memRegion.hpp"
  57 #include "utilities/stack.hpp"
  58 
  59 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  60 // It uses the "Garbage First" heap organization and algorithm, which
  61 // may combine concurrent marking with parallel, incremental compaction of
  62 // heap subsets that will yield large amounts of garbage.
  63 
  64 // Forward declarations
  65 class HeapRegion;
  66 class GenerationSpec;
  67 class G1ParScanThreadState;
  68 class G1ParScanThreadStateSet;
  69 class G1ParScanThreadState;
  70 class MemoryPool;
  71 class MemoryManager;
  72 class ObjectClosure;
  73 class SpaceClosure;
  74 class CompactibleSpaceClosure;
  75 class Space;
  76 class G1CollectionSet;
  77 class G1CollectorPolicy;
  78 class G1Policy;
  79 class G1HotCardCache;
  80 class G1RemSet;
  81 class G1YoungRemSetSamplingThread;
  82 class HeapRegionRemSetIterator;
  83 class G1ConcurrentMark;
  84 class G1ConcurrentMarkThread;
  85 class G1ConcurrentRefine;
  86 class GenerationCounters;
  87 class STWGCTimer;
  88 class G1NewTracer;
  89 class EvacuationFailedInfo;
  90 class nmethod;
  91 class WorkGang;
  92 class G1Allocator;
  93 class G1ArchiveAllocator;
  94 class G1FullGCScope;
  95 class G1HeapVerifier;
  96 class G1HeapSizingPolicy;
  97 class G1HeapSummary;
  98 class G1EvacSummary;
  99 
 100 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 101 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
 102 
 103 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 104 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 105 
 106 // The G1 STW is alive closure.
 107 // An instance is embedded into the G1CH and used as the
 108 // (optional) _is_alive_non_header closure in the STW
 109 // reference processor. It is also extensively used during
 110 // reference processing during STW evacuation pauses.
 111 class G1STWIsAliveClosure : public BoolObjectClosure {
 112   G1CollectedHeap* _g1h;
 113 public:
 114   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 115   bool do_object_b(oop p);
 116 };
 117 
 118 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 119   G1CollectedHeap* _g1h;
 120 public:
 121   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 122   bool do_object_b(oop p);
 123 };
 124 
 125 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class G1FreeCollectionSetTask;
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VMStructs;
 138   friend class MutatorAllocRegion;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1EvacuateRegionsTask;
 147   friend class G1PLABAllocator;
 148 
 149   // Other related classes.
 150   friend class HeapRegionClaimer;
 151 
 152   // Testing classes.
 153   friend class G1CheckCSetFastTableClosure;
 154 
 155 private:
 156   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 157 
 158   WorkGang* _workers;
 159   G1CollectorPolicy* _collector_policy;
 160   G1CardTable* _card_table;
 161 
 162   SoftRefPolicy      _soft_ref_policy;
 163 
 164   static size_t _humongous_object_threshold_in_words;
 165 
 166   // These sets keep track of old, archive and humongous regions respectively.
 167   HeapRegionSet _old_set;
 168   HeapRegionSet _archive_set;
 169   HeapRegionSet _humongous_set;
 170 
 171   void eagerly_reclaim_humongous_regions();
 172   // Start a new incremental collection set for the next pause.
 173   void start_new_collection_set();
 174 
 175   // The block offset table for the G1 heap.
 176   G1BlockOffsetTable* _bot;
 177 
 178   // Tears down the region sets / lists so that they are empty and the
 179   // regions on the heap do not belong to a region set / list. The
 180   // only exception is the humongous set which we leave unaltered. If
 181   // free_list_only is true, it will only tear down the master free
 182   // list. It is called before a Full GC (free_list_only == false) or
 183   // before heap shrinking (free_list_only == true).
 184   void tear_down_region_sets(bool free_list_only);
 185 
 186   // Rebuilds the region sets / lists so that they are repopulated to
 187   // reflect the contents of the heap. The only exception is the
 188   // humongous set which was not torn down in the first place. If
 189   // free_list_only is true, it will only rebuild the master free
 190   // list. It is called after a Full GC (free_list_only == false) or
 191   // after heap shrinking (free_list_only == true).
 192   void rebuild_region_sets(bool free_list_only);
 193 
 194   // Callback for region mapping changed events.
 195   G1RegionMappingChangedListener _listener;
 196 
 197   // The sequence of all heap regions in the heap.
 198   HeapRegionManager* _hrm;
 199 
 200   // Manages all allocations with regions except humongous object allocations.
 201   G1Allocator* _allocator;
 202 
 203   // Manages all heap verification.
 204   G1HeapVerifier* _verifier;
 205 
 206   // Outside of GC pauses, the number of bytes used in all regions other
 207   // than the current allocation region(s).
 208   volatile size_t _summary_bytes_used;
 209 
 210   void increase_used(size_t bytes);
 211   void decrease_used(size_t bytes);
 212 
 213   void set_used(size_t bytes);
 214 
 215   // Class that handles archive allocation ranges.
 216   G1ArchiveAllocator* _archive_allocator;
 217 
 218   // GC allocation statistics policy for survivors.
 219   G1EvacStats _survivor_evac_stats;
 220 
 221   // GC allocation statistics policy for tenured objects.
 222   G1EvacStats _old_evac_stats;
 223 
 224   // It specifies whether we should attempt to expand the heap after a
 225   // region allocation failure. If heap expansion fails we set this to
 226   // false so that we don't re-attempt the heap expansion (it's likely
 227   // that subsequent expansion attempts will also fail if one fails).
 228   // Currently, it is only consulted during GC and it's reset at the
 229   // start of each GC.
 230   bool _expand_heap_after_alloc_failure;
 231 
 232   // Helper for monitoring and management support.
 233   G1MonitoringSupport* _g1mm;
 234 
 235   // Records whether the region at the given index is (still) a
 236   // candidate for eager reclaim.  Only valid for humongous start
 237   // regions; other regions have unspecified values.  Humongous start
 238   // regions are initialized at start of collection pause, with
 239   // candidates removed from the set as they are found reachable from
 240   // roots or the young generation.
 241   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 242    protected:
 243     bool default_value() const { return false; }
 244    public:
 245     void clear() { G1BiasedMappedArray<bool>::clear(); }
 246     void set_candidate(uint region, bool value) {
 247       set_by_index(region, value);
 248     }
 249     bool is_candidate(uint region) {
 250       return get_by_index(region);
 251     }
 252   };
 253 
 254   HumongousReclaimCandidates _humongous_reclaim_candidates;
 255   // Stores whether during humongous object registration we found candidate regions.
 256   // If not, we can skip a few steps.
 257   bool _has_humongous_reclaim_candidates;
 258 
 259   G1HRPrinter _hr_printer;
 260 
 261   // It decides whether an explicit GC should start a concurrent cycle
 262   // instead of doing a STW GC. Currently, a concurrent cycle is
 263   // explicitly started if:
 264   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 265   // (b) cause == _g1_humongous_allocation
 266   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 267   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
 268   // (e) cause == _wb_conc_mark
 269   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 270 
 271   // Return true if should upgrade to full gc after an incremental one.
 272   bool should_upgrade_to_full_gc(GCCause::Cause cause);
 273 
 274   // indicates whether we are in young or mixed GC mode
 275   G1CollectorState _collector_state;
 276 
 277   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 278   // concurrent cycles) we have started.
 279   volatile uint _old_marking_cycles_started;
 280 
 281   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 282   // concurrent cycles) we have completed.
 283   volatile uint _old_marking_cycles_completed;
 284 
 285   // This is a non-product method that is helpful for testing. It is
 286   // called at the end of a GC and artificially expands the heap by
 287   // allocating a number of dead regions. This way we can induce very
 288   // frequent marking cycles and stress the cleanup / concurrent
 289   // cleanup code more (as all the regions that will be allocated by
 290   // this method will be found dead by the marking cycle).
 291   void allocate_dummy_regions() PRODUCT_RETURN;
 292 
 293   // If the HR printer is active, dump the state of the regions in the
 294   // heap after a compaction.
 295   void print_hrm_post_compaction();
 296 
 297   // Create a memory mapper for auxiliary data structures of the given size and
 298   // translation factor.
 299   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 300                                                          size_t size,
 301                                                          size_t translation_factor);
 302 
 303   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 304 
 305   // These are macros so that, if the assert fires, we get the correct
 306   // line number, file, etc.
 307 
 308 #define heap_locking_asserts_params(_extra_message_)                          \
 309   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 310   (_extra_message_),                                                          \
 311   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 312   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 313   BOOL_TO_STR(Thread::current()->is_VM_thread())
 314 
 315 #define assert_heap_locked()                                                  \
 316   do {                                                                        \
 317     assert(Heap_lock->owned_by_self(),                                        \
 318            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 319   } while (0)
 320 
 321 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 322   do {                                                                        \
 323     assert(Heap_lock->owned_by_self() ||                                      \
 324            (SafepointSynchronize::is_at_safepoint() &&                        \
 325              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 326            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 327                                         "should be at a safepoint"));         \
 328   } while (0)
 329 
 330 #define assert_heap_locked_and_not_at_safepoint()                             \
 331   do {                                                                        \
 332     assert(Heap_lock->owned_by_self() &&                                      \
 333                                     !SafepointSynchronize::is_at_safepoint(), \
 334           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 335                                        "should not be at a safepoint"));      \
 336   } while (0)
 337 
 338 #define assert_heap_not_locked()                                              \
 339   do {                                                                        \
 340     assert(!Heap_lock->owned_by_self(),                                       \
 341         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 342   } while (0)
 343 
 344 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 345   do {                                                                        \
 346     assert(!Heap_lock->owned_by_self() &&                                     \
 347                                     !SafepointSynchronize::is_at_safepoint(), \
 348       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 349                                    "should not be at a safepoint"));          \
 350   } while (0)
 351 
 352 #define assert_at_safepoint_on_vm_thread()                                    \
 353   do {                                                                        \
 354     assert_at_safepoint();                                                    \
 355     assert(Thread::current_or_null() != NULL, "no current thread");           \
 356     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 357   } while (0)
 358 
 359 #define assert_used_and_recalculate_used(g1h)                                 \
 360   do {                                                                        \
 361     size_t cur_used_bytes = g1h->used();                                      \
 362     size_t recal_used_bytes = g1h->recalculate_used();                        \
 363     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 364            " same as recalculated used(" SIZE_FORMAT ").",                    \
 365            cur_used_bytes, recal_used_bytes);                                 \
 366   } while (0)
 367 
 368   const char* young_gc_name() const;
 369 
 370   // The young region list.
 371   G1EdenRegions _eden;
 372   G1SurvivorRegions _survivor;
 373 
 374   STWGCTimer* _gc_timer_stw;
 375 
 376   G1NewTracer* _gc_tracer_stw;
 377 
 378   // The current policy object for the collector.
 379   G1Policy* _policy;
 380   G1HeapSizingPolicy* _heap_sizing_policy;
 381 
 382   G1CollectionSet _collection_set;
 383 
 384   // Try to allocate a single non-humongous HeapRegion sufficient for
 385   // an allocation of the given word_size. If do_expand is true,
 386   // attempt to expand the heap if necessary to satisfy the allocation
 387   // request. 'type' takes the type of region to be allocated. (Use constants
 388   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 389   HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
 390 
 391   // Initialize a contiguous set of free regions of length num_regions
 392   // and starting at index first so that they appear as a single
 393   // humongous region.
 394   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
 395                                                       uint num_regions,
 396                                                       size_t word_size);
 397 
 398   // Attempt to allocate a humongous object of the given size. Return
 399   // NULL if unsuccessful.
 400   HeapWord* humongous_obj_allocate(size_t word_size);
 401 
 402   // The following two methods, allocate_new_tlab() and
 403   // mem_allocate(), are the two main entry points from the runtime
 404   // into the G1's allocation routines. They have the following
 405   // assumptions:
 406   //
 407   // * They should both be called outside safepoints.
 408   //
 409   // * They should both be called without holding the Heap_lock.
 410   //
 411   // * All allocation requests for new TLABs should go to
 412   //   allocate_new_tlab().
 413   //
 414   // * All non-TLAB allocation requests should go to mem_allocate().
 415   //
 416   // * If either call cannot satisfy the allocation request using the
 417   //   current allocating region, they will try to get a new one. If
 418   //   this fails, they will attempt to do an evacuation pause and
 419   //   retry the allocation.
 420   //
 421   // * If all allocation attempts fail, even after trying to schedule
 422   //   an evacuation pause, allocate_new_tlab() will return NULL,
 423   //   whereas mem_allocate() will attempt a heap expansion and/or
 424   //   schedule a Full GC.
 425   //
 426   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 427   //   should never be called with word_size being humongous. All
 428   //   humongous allocation requests should go to mem_allocate() which
 429   //   will satisfy them with a special path.
 430 
 431   virtual HeapWord* allocate_new_tlab(size_t min_size,
 432                                       size_t requested_size,
 433                                       size_t* actual_size);
 434 
 435   virtual HeapWord* mem_allocate(size_t word_size,
 436                                  bool*  gc_overhead_limit_was_exceeded);
 437 
 438   // First-level mutator allocation attempt: try to allocate out of
 439   // the mutator alloc region without taking the Heap_lock. This
 440   // should only be used for non-humongous allocations.
 441   inline HeapWord* attempt_allocation(size_t min_word_size,
 442                                       size_t desired_word_size,
 443                                       size_t* actual_word_size);
 444 
 445   // Second-level mutator allocation attempt: take the Heap_lock and
 446   // retry the allocation attempt, potentially scheduling a GC
 447   // pause. This should only be used for non-humongous allocations.
 448   HeapWord* attempt_allocation_slow(size_t word_size);
 449 
 450   // Takes the Heap_lock and attempts a humongous allocation. It can
 451   // potentially schedule a GC pause.
 452   HeapWord* attempt_allocation_humongous(size_t word_size);
 453 
 454   // Allocation attempt that should be called during safepoints (e.g.,
 455   // at the end of a successful GC). expect_null_mutator_alloc_region
 456   // specifies whether the mutator alloc region is expected to be NULL
 457   // or not.
 458   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 459                                             bool expect_null_mutator_alloc_region);
 460 
 461   // These methods are the "callbacks" from the G1AllocRegion class.
 462 
 463   // For mutator alloc regions.
 464   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 465   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 466                                    size_t allocated_bytes);
 467 
 468   // For GC alloc regions.
 469   bool has_more_regions(InCSetState dest);
 470   HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
 471   void retire_gc_alloc_region(HeapRegion* alloc_region,
 472                               size_t allocated_bytes, InCSetState dest);
 473 
 474   // - if explicit_gc is true, the GC is for a System.gc() etc,
 475   //   otherwise it's for a failed allocation.
 476   // - if clear_all_soft_refs is true, all soft references should be
 477   //   cleared during the GC.
 478   // - it returns false if it is unable to do the collection due to the
 479   //   GC locker being active, true otherwise.
 480   bool do_full_collection(bool explicit_gc,
 481                           bool clear_all_soft_refs);
 482 
 483   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 484   virtual void do_full_collection(bool clear_all_soft_refs);
 485 
 486   // Callback from VM_G1CollectForAllocation operation.
 487   // This function does everything necessary/possible to satisfy a
 488   // failed allocation request (including collection, expansion, etc.)
 489   HeapWord* satisfy_failed_allocation(size_t word_size,
 490                                       bool* succeeded);
 491   // Internal helpers used during full GC to split it up to
 492   // increase readability.
 493   void abort_concurrent_cycle();
 494   void verify_before_full_collection(bool explicit_gc);
 495   void prepare_heap_for_full_collection();
 496   void prepare_heap_for_mutators();
 497   void abort_refinement();
 498   void verify_after_full_collection();
 499   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 500 
 501   // Helper method for satisfy_failed_allocation()
 502   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 503                                              bool do_gc,
 504                                              bool clear_all_soft_refs,
 505                                              bool expect_null_mutator_alloc_region,
 506                                              bool* gc_succeeded);
 507 
 508   // Attempting to expand the heap sufficiently
 509   // to support an allocation of the given "word_size".  If
 510   // successful, perform the allocation and return the address of the
 511   // allocated block, or else "NULL".
 512   HeapWord* expand_and_allocate(size_t word_size);
 513 
 514   // Process any reference objects discovered.
 515   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 516 
 517   // If during an initial mark pause we may install a pending list head which is not
 518   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 519   // to discover.
 520   void make_pending_list_reachable();
 521 
 522   // Merges the information gathered on a per-thread basis for all worker threads
 523   // during GC into global variables.
 524   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 525 public:
 526   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 527 
 528   WorkGang* workers() const { return _workers; }
 529 
 530   // Runs the given AbstractGangTask with the current active workers, returning the
 531   // total time taken.
 532   Tickspan run_task(AbstractGangTask* task);
 533 
 534   G1Allocator* allocator() {
 535     return _allocator;
 536   }
 537 
 538   G1HeapVerifier* verifier() {
 539     return _verifier;
 540   }
 541 
 542   G1MonitoringSupport* g1mm() {
 543     assert(_g1mm != NULL, "should have been initialized");
 544     return _g1mm;
 545   }
 546 
 547   void resize_heap_if_necessary();
 548 
 549   // Expand the garbage-first heap by at least the given size (in bytes!).
 550   // Returns true if the heap was expanded by the requested amount;
 551   // false otherwise.
 552   // (Rounds up to a HeapRegion boundary.)
 553   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 554 
 555   // Returns the PLAB statistics for a given destination.
 556   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 557 
 558   // Determines PLAB size for a given destination.
 559   inline size_t desired_plab_sz(InCSetState dest);
 560 
 561   // Do anything common to GC's.
 562   void gc_prologue(bool full);
 563   void gc_epilogue(bool full);
 564 
 565   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 566   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 567 
 568   // Modify the reclaim candidate set and test for presence.
 569   // These are only valid for starts_humongous regions.
 570   inline void set_humongous_reclaim_candidate(uint region, bool value);
 571   inline bool is_humongous_reclaim_candidate(uint region);
 572 
 573   // Remove from the reclaim candidate set.  Also remove from the
 574   // collection set so that later encounters avoid the slow path.
 575   inline void set_humongous_is_live(oop obj);
 576 
 577   // Register the given region to be part of the collection set.
 578   inline void register_humongous_region_with_cset(uint index);
 579   // Register regions with humongous objects (actually on the start region) in
 580   // the in_cset_fast_test table.
 581   void register_humongous_regions_with_cset();
 582   // We register a region with the fast "in collection set" test. We
 583   // simply set to true the array slot corresponding to this region.
 584   void register_young_region_with_cset(HeapRegion* r) {
 585     _in_cset_fast_test.set_in_young(r->hrm_index());
 586   }
 587   void register_old_region_with_cset(HeapRegion* r) {
 588     _in_cset_fast_test.set_in_old(r->hrm_index());
 589   }
 590   void register_optional_region_with_cset(HeapRegion* r) {
 591     _in_cset_fast_test.set_optional(r->hrm_index());
 592   }
 593   void clear_in_cset(const HeapRegion* hr) {
 594     _in_cset_fast_test.clear(hr);
 595   }
 596 
 597   void clear_cset_fast_test() {
 598     _in_cset_fast_test.clear();
 599   }
 600 
 601   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 602 
 603   // This is called at the start of either a concurrent cycle or a Full
 604   // GC to update the number of old marking cycles started.
 605   void increment_old_marking_cycles_started();
 606 
 607   // This is called at the end of either a concurrent cycle or a Full
 608   // GC to update the number of old marking cycles completed. Those two
 609   // can happen in a nested fashion, i.e., we start a concurrent
 610   // cycle, a Full GC happens half-way through it which ends first,
 611   // and then the cycle notices that a Full GC happened and ends
 612   // too. The concurrent parameter is a boolean to help us do a bit
 613   // tighter consistency checking in the method. If concurrent is
 614   // false, the caller is the inner caller in the nesting (i.e., the
 615   // Full GC). If concurrent is true, the caller is the outer caller
 616   // in this nesting (i.e., the concurrent cycle). Further nesting is
 617   // not currently supported. The end of this call also notifies
 618   // the FullGCCount_lock in case a Java thread is waiting for a full
 619   // GC to happen (e.g., it called System.gc() with
 620   // +ExplicitGCInvokesConcurrent).
 621   void increment_old_marking_cycles_completed(bool concurrent);
 622 
 623   uint old_marking_cycles_completed() {
 624     return _old_marking_cycles_completed;
 625   }
 626 
 627   G1HRPrinter* hr_printer() { return &_hr_printer; }
 628 
 629   // Allocates a new heap region instance.
 630   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 631 
 632   // Allocate the highest free region in the reserved heap. This will commit
 633   // regions as necessary.
 634   HeapRegion* alloc_highest_free_region();
 635 
 636   // Frees a non-humongous region by initializing its contents and
 637   // adding it to the free list that's passed as a parameter (this is
 638   // usually a local list which will be appended to the master free
 639   // list later). The used bytes of freed regions are accumulated in
 640   // pre_used. If skip_remset is true, the region's RSet will not be freed
 641   // up. If skip_hot_card_cache is true, the region's hot card cache will not
 642   // be freed up. The assumption is that this will be done later.
 643   // The locked parameter indicates if the caller has already taken
 644   // care of proper synchronization. This may allow some optimizations.
 645   void free_region(HeapRegion* hr,
 646                    FreeRegionList* free_list,
 647                    bool skip_remset,
 648                    bool skip_hot_card_cache = false,
 649                    bool locked = false);
 650 
 651   // It dirties the cards that cover the block so that the post
 652   // write barrier never queues anything when updating objects on this
 653   // block. It is assumed (and in fact we assert) that the block
 654   // belongs to a young region.
 655   inline void dirty_young_block(HeapWord* start, size_t word_size);
 656 
 657   // Frees a humongous region by collapsing it into individual regions
 658   // and calling free_region() for each of them. The freed regions
 659   // will be added to the free list that's passed as a parameter (this
 660   // is usually a local list which will be appended to the master free
 661   // list later).
 662   // The method assumes that only a single thread is ever calling
 663   // this for a particular region at once.
 664   void free_humongous_region(HeapRegion* hr,
 665                              FreeRegionList* free_list);
 666 
 667   // Facility for allocating in 'archive' regions in high heap memory and
 668   // recording the allocated ranges. These should all be called from the
 669   // VM thread at safepoints, without the heap lock held. They can be used
 670   // to create and archive a set of heap regions which can be mapped at the
 671   // same fixed addresses in a subsequent JVM invocation.
 672   void begin_archive_alloc_range(bool open = false);
 673 
 674   // Check if the requested size would be too large for an archive allocation.
 675   bool is_archive_alloc_too_large(size_t word_size);
 676 
 677   // Allocate memory of the requested size from the archive region. This will
 678   // return NULL if the size is too large or if no memory is available. It
 679   // does not trigger a garbage collection.
 680   HeapWord* archive_mem_allocate(size_t word_size);
 681 
 682   // Optionally aligns the end address and returns the allocated ranges in
 683   // an array of MemRegions in order of ascending addresses.
 684   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 685                                size_t end_alignment_in_bytes = 0);
 686 
 687   // Facility for allocating a fixed range within the heap and marking
 688   // the containing regions as 'archive'. For use at JVM init time, when the
 689   // caller may mmap archived heap data at the specified range(s).
 690   // Verify that the MemRegions specified in the argument array are within the
 691   // reserved heap.
 692   bool check_archive_addresses(MemRegion* range, size_t count);
 693 
 694   // Commit the appropriate G1 regions containing the specified MemRegions
 695   // and mark them as 'archive' regions. The regions in the array must be
 696   // non-overlapping and in order of ascending address.
 697   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 698 
 699   // Insert any required filler objects in the G1 regions around the specified
 700   // ranges to make the regions parseable. This must be called after
 701   // alloc_archive_regions, and after class loading has occurred.
 702   void fill_archive_regions(MemRegion* range, size_t count);
 703 
 704   // For each of the specified MemRegions, uncommit the containing G1 regions
 705   // which had been allocated by alloc_archive_regions. This should be called
 706   // rather than fill_archive_regions at JVM init time if the archive file
 707   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 708   void dealloc_archive_regions(MemRegion* range, size_t count, bool is_open);
 709 
 710   oop materialize_archived_object(oop obj);
 711 
 712 private:
 713 
 714   // Shrink the garbage-first heap by at most the given size (in bytes!).
 715   // (Rounds down to a HeapRegion boundary.)
 716   void shrink(size_t expand_bytes);
 717   void shrink_helper(size_t expand_bytes);
 718 
 719   #if TASKQUEUE_STATS
 720   static void print_taskqueue_stats_hdr(outputStream* const st);
 721   void print_taskqueue_stats() const;
 722   void reset_taskqueue_stats();
 723   #endif // TASKQUEUE_STATS
 724 
 725   // Schedule the VM operation that will do an evacuation pause to
 726   // satisfy an allocation request of word_size. *succeeded will
 727   // return whether the VM operation was successful (it did do an
 728   // evacuation pause) or not (another thread beat us to it or the GC
 729   // locker was active). Given that we should not be holding the
 730   // Heap_lock when we enter this method, we will pass the
 731   // gc_count_before (i.e., total_collections()) as a parameter since
 732   // it has to be read while holding the Heap_lock. Currently, both
 733   // methods that call do_collection_pause() release the Heap_lock
 734   // before the call, so it's easy to read gc_count_before just before.
 735   HeapWord* do_collection_pause(size_t         word_size,
 736                                 uint           gc_count_before,
 737                                 bool*          succeeded,
 738                                 GCCause::Cause gc_cause);
 739 
 740   void wait_for_root_region_scanning();
 741 
 742   // The guts of the incremental collection pause, executed by the vm
 743   // thread. It returns false if it is unable to do the collection due
 744   // to the GC locker being active, true otherwise
 745   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 746 
 747   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 748   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 749   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 750 
 751   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 752 
 753   // Actually do the work of evacuating the parts of the collection set.
 754   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 755   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 756 private:
 757   // Evacuate the next set of optional regions.
 758   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 759 
 760 public:
 761   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
 762   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 763 
 764   void expand_heap_after_young_collection();
 765   // Update object copying statistics.
 766   void record_obj_copy_mem_stats();
 767 
 768   // The hot card cache for remembered set insertion optimization.
 769   G1HotCardCache* _hot_card_cache;
 770 
 771   // The g1 remembered set of the heap.
 772   G1RemSet* _rem_set;
 773 
 774   // A set of cards that cover the objects for which the Rsets should be updated
 775   // concurrently after the collection.
 776   G1DirtyCardQueueSet _dirty_card_queue_set;
 777 
 778   // After a collection pause, convert the regions in the collection set into free
 779   // regions.
 780   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 781 
 782   // Abandon the current collection set without recording policy
 783   // statistics or updating free lists.
 784   void abandon_collection_set(G1CollectionSet* collection_set);
 785 
 786   // The concurrent marker (and the thread it runs in.)
 787   G1ConcurrentMark* _cm;
 788   G1ConcurrentMarkThread* _cm_thread;
 789 
 790   // The concurrent refiner.
 791   G1ConcurrentRefine* _cr;
 792 
 793   // The parallel task queues
 794   RefToScanQueueSet *_task_queues;
 795 
 796   // True iff a evacuation has failed in the current collection.
 797   bool _evacuation_failed;
 798 
 799   EvacuationFailedInfo* _evacuation_failed_info_array;
 800 
 801   // Failed evacuations cause some logical from-space objects to have
 802   // forwarding pointers to themselves.  Reset them.
 803   void remove_self_forwarding_pointers();
 804 
 805   // Restore the objects in the regions in the collection set after an
 806   // evacuation failure.
 807   void restore_after_evac_failure();
 808 
 809   PreservedMarksSet _preserved_marks_set;
 810 
 811   // Preserve the mark of "obj", if necessary, in preparation for its mark
 812   // word being overwritten with a self-forwarding-pointer.
 813   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
 814 
 815 #ifndef PRODUCT
 816   // Support for forcing evacuation failures. Analogous to
 817   // PromotionFailureALot for the other collectors.
 818 
 819   // Records whether G1EvacuationFailureALot should be in effect
 820   // for the current GC
 821   bool _evacuation_failure_alot_for_current_gc;
 822 
 823   // Used to record the GC number for interval checking when
 824   // determining whether G1EvaucationFailureALot is in effect
 825   // for the current GC.
 826   size_t _evacuation_failure_alot_gc_number;
 827 
 828   // Count of the number of evacuations between failures.
 829   volatile size_t _evacuation_failure_alot_count;
 830 
 831   // Set whether G1EvacuationFailureALot should be in effect
 832   // for the current GC (based upon the type of GC and which
 833   // command line flags are set);
 834   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 835                                                   bool during_initial_mark,
 836                                                   bool mark_or_rebuild_in_progress);
 837 
 838   inline void set_evacuation_failure_alot_for_current_gc();
 839 
 840   // Return true if it's time to cause an evacuation failure.
 841   inline bool evacuation_should_fail();
 842 
 843   // Reset the G1EvacuationFailureALot counters.  Should be called at
 844   // the end of an evacuation pause in which an evacuation failure occurred.
 845   inline void reset_evacuation_should_fail();
 846 #endif // !PRODUCT
 847 
 848   // ("Weak") Reference processing support.
 849   //
 850   // G1 has 2 instances of the reference processor class. One
 851   // (_ref_processor_cm) handles reference object discovery
 852   // and subsequent processing during concurrent marking cycles.
 853   //
 854   // The other (_ref_processor_stw) handles reference object
 855   // discovery and processing during full GCs and incremental
 856   // evacuation pauses.
 857   //
 858   // During an incremental pause, reference discovery will be
 859   // temporarily disabled for _ref_processor_cm and will be
 860   // enabled for _ref_processor_stw. At the end of the evacuation
 861   // pause references discovered by _ref_processor_stw will be
 862   // processed and discovery will be disabled. The previous
 863   // setting for reference object discovery for _ref_processor_cm
 864   // will be re-instated.
 865   //
 866   // At the start of marking:
 867   //  * Discovery by the CM ref processor is verified to be inactive
 868   //    and it's discovered lists are empty.
 869   //  * Discovery by the CM ref processor is then enabled.
 870   //
 871   // At the end of marking:
 872   //  * Any references on the CM ref processor's discovered
 873   //    lists are processed (possibly MT).
 874   //
 875   // At the start of full GC we:
 876   //  * Disable discovery by the CM ref processor and
 877   //    empty CM ref processor's discovered lists
 878   //    (without processing any entries).
 879   //  * Verify that the STW ref processor is inactive and it's
 880   //    discovered lists are empty.
 881   //  * Temporarily set STW ref processor discovery as single threaded.
 882   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 883   //    field.
 884   //  * Finally enable discovery by the STW ref processor.
 885   //
 886   // The STW ref processor is used to record any discovered
 887   // references during the full GC.
 888   //
 889   // At the end of a full GC we:
 890   //  * Enqueue any reference objects discovered by the STW ref processor
 891   //    that have non-live referents. This has the side-effect of
 892   //    making the STW ref processor inactive by disabling discovery.
 893   //  * Verify that the CM ref processor is still inactive
 894   //    and no references have been placed on it's discovered
 895   //    lists (also checked as a precondition during initial marking).
 896 
 897   // The (stw) reference processor...
 898   ReferenceProcessor* _ref_processor_stw;
 899 
 900   // During reference object discovery, the _is_alive_non_header
 901   // closure (if non-null) is applied to the referent object to
 902   // determine whether the referent is live. If so then the
 903   // reference object does not need to be 'discovered' and can
 904   // be treated as a regular oop. This has the benefit of reducing
 905   // the number of 'discovered' reference objects that need to
 906   // be processed.
 907   //
 908   // Instance of the is_alive closure for embedding into the
 909   // STW reference processor as the _is_alive_non_header field.
 910   // Supplying a value for the _is_alive_non_header field is
 911   // optional but doing so prevents unnecessary additions to
 912   // the discovered lists during reference discovery.
 913   G1STWIsAliveClosure _is_alive_closure_stw;
 914 
 915   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 916 
 917   // The (concurrent marking) reference processor...
 918   ReferenceProcessor* _ref_processor_cm;
 919 
 920   // Instance of the concurrent mark is_alive closure for embedding
 921   // into the Concurrent Marking reference processor as the
 922   // _is_alive_non_header field. Supplying a value for the
 923   // _is_alive_non_header field is optional but doing so prevents
 924   // unnecessary additions to the discovered lists during reference
 925   // discovery.
 926   G1CMIsAliveClosure _is_alive_closure_cm;
 927 
 928   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 929 public:
 930 
 931   RefToScanQueue *task_queue(uint i) const;
 932 
 933   uint num_task_queues() const;
 934 
 935   // A set of cards where updates happened during the GC
 936   G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 937 
 938   // Create a G1CollectedHeap with the specified policy.
 939   // Must call the initialize method afterwards.
 940   // May not return if something goes wrong.
 941   G1CollectedHeap(G1CollectorPolicy* policy);
 942 
 943 private:
 944   jint initialize_concurrent_refinement();
 945   jint initialize_young_gen_sampling_thread();
 946 public:
 947   // Initialize the G1CollectedHeap to have the initial and
 948   // maximum sizes and remembered and barrier sets
 949   // specified by the policy object.
 950   jint initialize();
 951 
 952   virtual void stop();
 953   virtual void safepoint_synchronize_begin();
 954   virtual void safepoint_synchronize_end();
 955 
 956   // Return the (conservative) maximum heap alignment for any G1 heap
 957   static size_t conservative_max_heap_alignment();
 958 
 959   // Does operations required after initialization has been done.
 960   void post_initialize();
 961 
 962   // Initialize weak reference processing.
 963   void ref_processing_init();
 964 
 965   virtual Name kind() const {
 966     return CollectedHeap::G1;
 967   }
 968 
 969   virtual const char* name() const {
 970     return "G1";
 971   }
 972 
 973   const G1CollectorState* collector_state() const { return &_collector_state; }
 974   G1CollectorState* collector_state() { return &_collector_state; }
 975 
 976   // The current policy object for the collector.
 977   G1Policy* policy() const { return _policy; }
 978   // The remembered set.
 979   G1RemSet* rem_set() const { return _rem_set; }
 980 
 981   inline G1GCPhaseTimes* phase_times() const;
 982 
 983   HeapRegionManager* hrm() const { return _hrm; }
 984 
 985   const G1CollectionSet* collection_set() const { return &_collection_set; }
 986   G1CollectionSet* collection_set() { return &_collection_set; }
 987 
 988   virtual CollectorPolicy* collector_policy() const;
 989 
 990   virtual SoftRefPolicy* soft_ref_policy();
 991 
 992   virtual void initialize_serviceability();
 993   virtual MemoryUsage memory_usage();
 994   virtual GrowableArray<GCMemoryManager*> memory_managers();
 995   virtual GrowableArray<MemoryPool*> memory_pools();
 996 
 997   // Try to minimize the remembered set.
 998   void scrub_rem_set();
 999 
1000   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1001   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i);
1002 
1003   // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
1004   void iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i);
1005 
1006   // The shared block offset table array.
1007   G1BlockOffsetTable* bot() const { return _bot; }
1008 
1009   // Reference Processing accessors
1010 
1011   // The STW reference processor....
1012   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1013 
1014   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1015 
1016   // The Concurrent Marking reference processor...
1017   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1018 
1019   size_t unused_committed_regions_in_bytes() const;
1020   virtual size_t capacity() const;
1021   virtual size_t used() const;
1022   // This should be called when we're not holding the heap lock. The
1023   // result might be a bit inaccurate.
1024   size_t used_unlocked() const;
1025   size_t recalculate_used() const;
1026 
1027   // These virtual functions do the actual allocation.
1028   // Some heaps may offer a contiguous region for shared non-blocking
1029   // allocation, via inlined code (by exporting the address of the top and
1030   // end fields defining the extent of the contiguous allocation region.)
1031   // But G1CollectedHeap doesn't yet support this.
1032 
1033   virtual bool is_maximal_no_gc() const {
1034     return _hrm->available() == 0;
1035   }
1036 
1037   // Returns whether there are any regions left in the heap for allocation.
1038   bool has_regions_left_for_allocation() const {
1039     return !is_maximal_no_gc() || num_free_regions() != 0;
1040   }
1041 
1042   // The current number of regions in the heap.
1043   uint num_regions() const { return _hrm->length(); }
1044 
1045   // The max number of regions in the heap.
1046   uint max_regions() const { return _hrm->max_length(); }
1047 
1048   // Max number of regions that can be comitted.
1049   uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
1050 
1051   // The number of regions that are completely free.
1052   uint num_free_regions() const { return _hrm->num_free_regions(); }
1053 
1054   // The number of regions that can be allocated into.
1055   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1056 
1057   MemoryUsage get_auxiliary_data_memory_usage() const {
1058     return _hrm->get_auxiliary_data_memory_usage();
1059   }
1060 
1061   // The number of regions that are not completely free.
1062   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1063 
1064 #ifdef ASSERT
1065   bool is_on_master_free_list(HeapRegion* hr) {
1066     return _hrm->is_free(hr);
1067   }
1068 #endif // ASSERT
1069 
1070   inline void old_set_add(HeapRegion* hr);
1071   inline void old_set_remove(HeapRegion* hr);
1072 
1073   inline void archive_set_add(HeapRegion* hr);
1074 
1075   size_t non_young_capacity_bytes() {
1076     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1077   }
1078 
1079   // Determine whether the given region is one that we are using as an
1080   // old GC alloc region.
1081   bool is_old_gc_alloc_region(HeapRegion* hr);
1082 
1083   // Perform a collection of the heap; intended for use in implementing
1084   // "System.gc".  This probably implies as full a collection as the
1085   // "CollectedHeap" supports.
1086   virtual void collect(GCCause::Cause cause);
1087 
1088   // Perform a collection of the heap with the given cause; if the VM operation
1089   // fails to execute for any reason, retry only if retry_on_gc_failure is set.
1090   // Returns whether this collection actually executed.
1091   bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure);
1092 
1093   // True iff an evacuation has failed in the most-recent collection.
1094   bool evacuation_failed() { return _evacuation_failed; }
1095 
1096   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1097   void prepend_to_freelist(FreeRegionList* list);
1098   void decrement_summary_bytes(size_t bytes);
1099 
1100   virtual bool is_in(const void* p) const;
1101 #ifdef ASSERT
1102   // Returns whether p is in one of the available areas of the heap. Slow but
1103   // extensive version.
1104   bool is_in_exact(const void* p) const;
1105 #endif
1106 
1107   // Return "TRUE" iff the given object address is within the collection
1108   // set. Assumes that the reference points into the heap.
1109   inline bool is_in_cset(const HeapRegion *hr);
1110   inline bool is_in_cset(oop obj);
1111   inline bool is_in_cset(HeapWord* addr);
1112 
1113   inline bool is_in_cset_or_humongous(const oop obj);
1114 
1115  private:
1116   // This array is used for a quick test on whether a reference points into
1117   // the collection set or not. Each of the array's elements denotes whether the
1118   // corresponding region is in the collection set or not.
1119   G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1120 
1121  public:
1122 
1123   inline InCSetState in_cset_state(const oop obj);
1124 
1125   // Return "TRUE" iff the given object address is in the reserved
1126   // region of g1.
1127   bool is_in_g1_reserved(const void* p) const {
1128     return _hrm->reserved().contains(p);
1129   }
1130 
1131   // Returns a MemRegion that corresponds to the space that has been
1132   // reserved for the heap
1133   MemRegion g1_reserved() const {
1134     return _hrm->reserved();
1135   }
1136 
1137   G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
1138 
1139   G1CardTable* card_table() const {
1140     return _card_table;
1141   }
1142 
1143   // Iteration functions.
1144 
1145   // Iterate over all objects, calling "cl.do_object" on each.
1146   virtual void object_iterate(ObjectClosure* cl);
1147 
1148   virtual void safe_object_iterate(ObjectClosure* cl) {
1149     object_iterate(cl);
1150   }
1151 
1152   // Iterate over heap regions, in address order, terminating the
1153   // iteration early if the "do_heap_region" method returns "true".
1154   void heap_region_iterate(HeapRegionClosure* blk) const;
1155 
1156   // Return the region with the given index. It assumes the index is valid.
1157   inline HeapRegion* region_at(uint index) const;
1158   inline HeapRegion* region_at_or_null(uint index) const;
1159 
1160   // Return the next region (by index) that is part of the same
1161   // humongous object that hr is part of.
1162   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1163 
1164   // Calculate the region index of the given address. Given address must be
1165   // within the heap.
1166   inline uint addr_to_region(HeapWord* addr) const;
1167 
1168   inline HeapWord* bottom_addr_for_region(uint index) const;
1169 
1170   // Two functions to iterate over the heap regions in parallel. Threads
1171   // compete using the HeapRegionClaimer to claim the regions before
1172   // applying the closure on them.
1173   // The _from_worker_offset version uses the HeapRegionClaimer and
1174   // the worker id to calculate a start offset to prevent all workers to
1175   // start from the point.
1176   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1177                                                   HeapRegionClaimer* hrclaimer,
1178                                                   uint worker_id) const;
1179 
1180   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1181                                           HeapRegionClaimer* hrclaimer) const;
1182 
1183   // Iterate over all regions currently in the current collection set.
1184   void collection_set_iterate_all(HeapRegionClosure* blk);
1185 
1186   // Iterate over the regions in the current increment of the collection set.
1187   // Starts the iteration so that the start regions of a given worker id over the
1188   // set active_workers are evenly spread across the set of collection set regions
1189   // to be iterated.
1190   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id);
1191 
1192   // Returns the HeapRegion that contains addr. addr must not be NULL.
1193   template <class T>
1194   inline HeapRegion* heap_region_containing(const T addr) const;
1195 
1196   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1197   // region. addr must not be NULL.
1198   template <class T>
1199   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1200 
1201   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1202   // each address in the (reserved) heap is a member of exactly
1203   // one block.  The defining characteristic of a block is that it is
1204   // possible to find its size, and thus to progress forward to the next
1205   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1206   // represent Java objects, or they might be free blocks in a
1207   // free-list-based heap (or subheap), as long as the two kinds are
1208   // distinguishable and the size of each is determinable.
1209 
1210   // Returns the address of the start of the "block" that contains the
1211   // address "addr".  We say "blocks" instead of "object" since some heaps
1212   // may not pack objects densely; a chunk may either be an object or a
1213   // non-object.
1214   virtual HeapWord* block_start(const void* addr) const;
1215 
1216   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1217   // the block is an object.
1218   virtual bool block_is_obj(const HeapWord* addr) const;
1219 
1220   // Section on thread-local allocation buffers (TLABs)
1221   // See CollectedHeap for semantics.
1222 
1223   bool supports_tlab_allocation() const;
1224   size_t tlab_capacity(Thread* ignored) const;
1225   size_t tlab_used(Thread* ignored) const;
1226   size_t max_tlab_size() const;
1227   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1228 
1229   inline bool is_in_young(const oop obj);
1230 
1231   // Returns "true" iff the given word_size is "very large".
1232   static bool is_humongous(size_t word_size) {
1233     // Note this has to be strictly greater-than as the TLABs
1234     // are capped at the humongous threshold and we want to
1235     // ensure that we don't try to allocate a TLAB as
1236     // humongous and that we don't allocate a humongous
1237     // object in a TLAB.
1238     return word_size > _humongous_object_threshold_in_words;
1239   }
1240 
1241   // Returns the humongous threshold for a specific region size
1242   static size_t humongous_threshold_for(size_t region_size) {
1243     return (region_size / 2);
1244   }
1245 
1246   // Returns the number of regions the humongous object of the given word size
1247   // requires.
1248   static size_t humongous_obj_size_in_regions(size_t word_size);
1249 
1250   // Print the maximum heap capacity.
1251   virtual size_t max_capacity() const;
1252 
1253   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1254   virtual size_t max_reserved_capacity() const;
1255 
1256   virtual jlong millis_since_last_gc();
1257 
1258 
1259   // Convenience function to be used in situations where the heap type can be
1260   // asserted to be this type.
1261   static G1CollectedHeap* heap();
1262 
1263   void set_region_short_lived_locked(HeapRegion* hr);
1264   // add appropriate methods for any other surv rate groups
1265 
1266   const G1SurvivorRegions* survivor() const { return &_survivor; }
1267 
1268   uint eden_regions_count() const { return _eden.length(); }
1269   uint survivor_regions_count() const { return _survivor.length(); }
1270   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1271   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1272   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1273   uint old_regions_count() const { return _old_set.length(); }
1274   uint archive_regions_count() const { return _archive_set.length(); }
1275   uint humongous_regions_count() const { return _humongous_set.length(); }
1276 
1277 #ifdef ASSERT
1278   bool check_young_list_empty();
1279 #endif
1280 
1281   // *** Stuff related to concurrent marking.  It's not clear to me that so
1282   // many of these need to be public.
1283 
1284   // The functions below are helper functions that a subclass of
1285   // "CollectedHeap" can use in the implementation of its virtual
1286   // functions.
1287   // This performs a concurrent marking of the live objects in a
1288   // bitmap off to the side.
1289   void do_concurrent_mark();
1290 
1291   bool is_marked_next(oop obj) const;
1292 
1293   // Determine if an object is dead, given the object and also
1294   // the region to which the object belongs. An object is dead
1295   // iff a) it was not allocated since the last mark, b) it
1296   // is not marked, and c) it is not in an archive region.
1297   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1298     return
1299       hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1300       !hr->is_archive();
1301   }
1302 
1303   // This function returns true when an object has been
1304   // around since the previous marking and hasn't yet
1305   // been marked during this marking, and is not in an archive region.
1306   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1307     return
1308       !hr->obj_allocated_since_next_marking(obj) &&
1309       !is_marked_next(obj) &&
1310       !hr->is_archive();
1311   }
1312 
1313   // Determine if an object is dead, given only the object itself.
1314   // This will find the region to which the object belongs and
1315   // then call the region version of the same function.
1316 
1317   // Added if it is NULL it isn't dead.
1318 
1319   inline bool is_obj_dead(const oop obj) const;
1320 
1321   inline bool is_obj_ill(const oop obj) const;
1322 
1323   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1324   inline bool is_obj_dead_full(const oop obj) const;
1325 
1326   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1327 
1328   // Refinement
1329 
1330   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1331 
1332   // Optimized nmethod scanning support routines
1333 
1334   // Register the given nmethod with the G1 heap.
1335   virtual void register_nmethod(nmethod* nm);
1336 
1337   // Unregister the given nmethod from the G1 heap.
1338   virtual void unregister_nmethod(nmethod* nm);
1339 
1340   // No nmethod flushing needed.
1341   virtual void flush_nmethod(nmethod* nm) {}
1342 
1343   // No nmethod verification implemented.
1344   virtual void verify_nmethod(nmethod* nm) {}
1345 
1346   // Free up superfluous code root memory.
1347   void purge_code_root_memory();
1348 
1349   // Rebuild the strong code root lists for each region
1350   // after a full GC.
1351   void rebuild_strong_code_roots();
1352 
1353   // Partial cleaning of VM internal data structures.
1354   void string_dedup_cleaning(BoolObjectClosure* is_alive,
1355                              OopClosure* keep_alive,
1356                              G1GCPhaseTimes* phase_times = NULL);
1357 
1358   // Performs cleaning of data structures after class unloading.
1359   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1360 
1361   // Redirty logged cards in the refinement queue.
1362   void redirty_logged_cards();
1363   // Verification
1364 
1365   // Deduplicate the string
1366   virtual void deduplicate_string(oop str);
1367 
1368   // Perform any cleanup actions necessary before allowing a verification.
1369   virtual void prepare_for_verify();
1370 
1371   // Perform verification.
1372 
1373   // vo == UsePrevMarking -> use "prev" marking information,
1374   // vo == UseNextMarking -> use "next" marking information
1375   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1376   //
1377   // NOTE: Only the "prev" marking information is guaranteed to be
1378   // consistent most of the time, so most calls to this should use
1379   // vo == UsePrevMarking.
1380   // Currently, there is only one case where this is called with
1381   // vo == UseNextMarking, which is to verify the "next" marking
1382   // information at the end of remark.
1383   // Currently there is only one place where this is called with
1384   // vo == UseFullMarking, which is to verify the marking during a
1385   // full GC.
1386   void verify(VerifyOption vo);
1387 
1388   // WhiteBox testing support.
1389   virtual bool supports_concurrent_phase_control() const;
1390   virtual bool request_concurrent_phase(const char* phase);
1391   bool is_heterogeneous_heap() const;
1392 
1393   virtual WorkGang* get_safepoint_workers() { return _workers; }
1394 
1395   // The methods below are here for convenience and dispatch the
1396   // appropriate method depending on value of the given VerifyOption
1397   // parameter. The values for that parameter, and their meanings,
1398   // are the same as those above.
1399 
1400   bool is_obj_dead_cond(const oop obj,
1401                         const HeapRegion* hr,
1402                         const VerifyOption vo) const;
1403 
1404   bool is_obj_dead_cond(const oop obj,
1405                         const VerifyOption vo) const;
1406 
1407   G1HeapSummary create_g1_heap_summary();
1408   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1409 
1410   // Printing
1411 private:
1412   void print_heap_regions() const;
1413   void print_regions_on(outputStream* st) const;
1414 
1415 public:
1416   virtual void print_on(outputStream* st) const;
1417   virtual void print_extended_on(outputStream* st) const;
1418   virtual void print_on_error(outputStream* st) const;
1419 
1420   virtual void print_gc_threads_on(outputStream* st) const;
1421   virtual void gc_threads_do(ThreadClosure* tc) const;
1422 
1423   // Override
1424   void print_tracing_info() const;
1425 
1426   // The following two methods are helpful for debugging RSet issues.
1427   void print_cset_rsets() PRODUCT_RETURN;
1428   void print_all_rsets() PRODUCT_RETURN;
1429 
1430   size_t pending_card_num();
1431 };
1432 
1433 class G1ParEvacuateFollowersClosure : public VoidClosure {
1434 private:
1435   double _start_term;
1436   double _term_time;
1437   size_t _term_attempts;
1438 
1439   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1440   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1441 protected:
1442   G1CollectedHeap*              _g1h;
1443   G1ParScanThreadState*         _par_scan_state;
1444   RefToScanQueueSet*            _queues;
1445   ParallelTaskTerminator*       _terminator;
1446   G1GCPhaseTimes::GCParPhases   _phase;
1447 
1448   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1449   RefToScanQueueSet*      queues()         { return _queues; }
1450   ParallelTaskTerminator* terminator()     { return _terminator; }
1451 
1452 public:
1453   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1454                                 G1ParScanThreadState* par_scan_state,
1455                                 RefToScanQueueSet* queues,
1456                                 ParallelTaskTerminator* terminator,
1457                                 G1GCPhaseTimes::GCParPhases phase)
1458     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1459       _g1h(g1h), _par_scan_state(par_scan_state),
1460       _queues(queues), _terminator(terminator), _phase(phase) {}
1461 
1462   void do_void();
1463 
1464   double term_time() const { return _term_time; }
1465   size_t term_attempts() const { return _term_attempts; }
1466 
1467 private:
1468   inline bool offer_termination();
1469 };
1470 
1471 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP