1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1EvacuationInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1HeapRegionAttr.hpp"
  43 #include "gc/g1/g1MonitoringSupport.hpp"
  44 #include "gc/g1/g1NUMA.hpp"
  45 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  46 #include "gc/g1/g1SurvivorRegions.hpp"
  47 #include "gc/g1/g1YCTypes.hpp"
  48 #include "gc/g1/heapRegionManager.hpp"
  49 #include "gc/g1/heapRegionSet.hpp"
  50 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  51 #include "gc/shared/barrierSet.hpp"
  52 #include "gc/shared/collectedHeap.hpp"
  53 #include "gc/shared/gcHeapSummary.hpp"
  54 #include "gc/shared/plab.hpp"
  55 #include "gc/shared/preservedMarks.hpp"
  56 #include "gc/shared/softRefPolicy.hpp"
  57 #include "memory/memRegion.hpp"
  58 #include "utilities/stack.hpp"
  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1CardTableEntryClosure;
  78 class G1CollectionSet;
  79 class G1Policy;
  80 class G1HotCardCache;
  81 class G1RemSet;
  82 class G1YoungRemSetSamplingThread;
  83 class G1ConcurrentMark;
  84 class G1ConcurrentMarkThread;
  85 class G1ConcurrentRefine;
  86 class GenerationCounters;
  87 class STWGCTimer;
  88 class G1NewTracer;
  89 class EvacuationFailedInfo;
  90 class nmethod;
  91 class WorkGang;
  92 class G1Allocator;
  93 class G1ArchiveAllocator;
  94 class G1FullGCScope;
  95 class G1HeapVerifier;
  96 class G1HeapSizingPolicy;
  97 class G1HeapSummary;
  98 class G1EvacSummary;
  99 
 100 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 101 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
 102 
 103 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 104 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 105 
 106 // The G1 STW is alive closure.
 107 // An instance is embedded into the G1CH and used as the
 108 // (optional) _is_alive_non_header closure in the STW
 109 // reference processor. It is also extensively used during
 110 // reference processing during STW evacuation pauses.
 111 class G1STWIsAliveClosure : public BoolObjectClosure {
 112   G1CollectedHeap* _g1h;
 113 public:
 114   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 115   bool do_object_b(oop p);
 116 };
 117 
 118 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 119   G1CollectedHeap* _g1h;
 120 public:
 121   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 122   bool do_object_b(oop p);
 123 };
 124 
 125 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class VM_CollectForMetadataAllocation;
 134   friend class VM_G1CollectForAllocation;
 135   friend class VM_G1CollectFull;
 136   friend class VMStructs;
 137   friend class MutatorAllocRegion;
 138   friend class G1FullCollector;
 139   friend class G1GCAllocRegion;
 140   friend class G1HeapVerifier;
 141 
 142   // Closures used in implementation.
 143   friend class G1ParScanThreadState;
 144   friend class G1ParScanThreadStateSet;
 145   friend class G1EvacuateRegionsTask;
 146   friend class G1PLABAllocator;
 147 
 148   // Other related classes.
 149   friend class HeapRegionClaimer;
 150 
 151   // Testing classes.
 152   friend class G1CheckRegionAttrTableClosure;
 153 
 154 private:
 155   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 156 
 157   WorkGang* _workers;
 158   G1CardTable* _card_table;
 159 
 160   SoftRefPolicy      _soft_ref_policy;
 161 
 162   static size_t _humongous_object_threshold_in_words;
 163 
 164   // These sets keep track of old, archive and humongous regions respectively.
 165   HeapRegionSet _old_set;
 166   HeapRegionSet _archive_set;
 167   HeapRegionSet _humongous_set;
 168 
 169   void eagerly_reclaim_humongous_regions();
 170   // Start a new incremental collection set for the next pause.
 171   void start_new_collection_set();
 172 
 173   // The block offset table for the G1 heap.
 174   G1BlockOffsetTable* _bot;
 175 
 176   // Tears down the region sets / lists so that they are empty and the
 177   // regions on the heap do not belong to a region set / list. The
 178   // only exception is the humongous set which we leave unaltered. If
 179   // free_list_only is true, it will only tear down the master free
 180   // list. It is called before a Full GC (free_list_only == false) or
 181   // before heap shrinking (free_list_only == true).
 182   void tear_down_region_sets(bool free_list_only);
 183 
 184   // Rebuilds the region sets / lists so that they are repopulated to
 185   // reflect the contents of the heap. The only exception is the
 186   // humongous set which was not torn down in the first place. If
 187   // free_list_only is true, it will only rebuild the master free
 188   // list. It is called after a Full GC (free_list_only == false) or
 189   // after heap shrinking (free_list_only == true).
 190   void rebuild_region_sets(bool free_list_only);
 191 
 192   // Callback for region mapping changed events.
 193   G1RegionMappingChangedListener _listener;
 194 
 195   // Handle G1 NUMA support.
 196   G1NUMA* _numa;
 197 
 198   // The sequence of all heap regions in the heap.
 199   HeapRegionManager* _hrm;
 200 
 201   // Manages all allocations with regions except humongous object allocations.
 202   G1Allocator* _allocator;
 203 
 204   // Manages all heap verification.
 205   G1HeapVerifier* _verifier;
 206 
 207   // Outside of GC pauses, the number of bytes used in all regions other
 208   // than the current allocation region(s).
 209   volatile size_t _summary_bytes_used;
 210 
 211   void increase_used(size_t bytes);
 212   void decrease_used(size_t bytes);
 213 
 214   void set_used(size_t bytes);
 215 
 216   // Class that handles archive allocation ranges.
 217   G1ArchiveAllocator* _archive_allocator;
 218 
 219   // GC allocation statistics policy for survivors.
 220   G1EvacStats _survivor_evac_stats;
 221 
 222   // GC allocation statistics policy for tenured objects.
 223   G1EvacStats _old_evac_stats;
 224 
 225   // It specifies whether we should attempt to expand the heap after a
 226   // region allocation failure. If heap expansion fails we set this to
 227   // false so that we don't re-attempt the heap expansion (it's likely
 228   // that subsequent expansion attempts will also fail if one fails).
 229   // Currently, it is only consulted during GC and it's reset at the
 230   // start of each GC.
 231   bool _expand_heap_after_alloc_failure;
 232 
 233   // Helper for monitoring and management support.
 234   G1MonitoringSupport* _g1mm;
 235 
 236   // Records whether the region at the given index is (still) a
 237   // candidate for eager reclaim.  Only valid for humongous start
 238   // regions; other regions have unspecified values.  Humongous start
 239   // regions are initialized at start of collection pause, with
 240   // candidates removed from the set as they are found reachable from
 241   // roots or the young generation.
 242   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 243    protected:
 244     bool default_value() const { return false; }
 245    public:
 246     void clear() { G1BiasedMappedArray<bool>::clear(); }
 247     void set_candidate(uint region, bool value) {
 248       set_by_index(region, value);
 249     }
 250     bool is_candidate(uint region) {
 251       return get_by_index(region);
 252     }
 253   };
 254 
 255   HumongousReclaimCandidates _humongous_reclaim_candidates;
 256   // Stores whether during humongous object registration we found candidate regions.
 257   // If not, we can skip a few steps.
 258   bool _has_humongous_reclaim_candidates;
 259 
 260   G1HRPrinter _hr_printer;
 261 
 262   // It decides whether an explicit GC should start a concurrent cycle
 263   // instead of doing a STW GC. Currently, a concurrent cycle is
 264   // explicitly started if:
 265   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 266   // (b) cause == _g1_humongous_allocation
 267   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 268   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
 269   // (e) cause == _wb_conc_mark
 270   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 271 
 272   // Return true if should upgrade to full gc after an incremental one.
 273   bool should_upgrade_to_full_gc(GCCause::Cause cause);
 274 
 275   // indicates whether we are in young or mixed GC mode
 276   G1CollectorState _collector_state;
 277 
 278   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 279   // concurrent cycles) we have started.
 280   volatile uint _old_marking_cycles_started;
 281 
 282   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 283   // concurrent cycles) we have completed.
 284   volatile uint _old_marking_cycles_completed;
 285 
 286   // This is a non-product method that is helpful for testing. It is
 287   // called at the end of a GC and artificially expands the heap by
 288   // allocating a number of dead regions. This way we can induce very
 289   // frequent marking cycles and stress the cleanup / concurrent
 290   // cleanup code more (as all the regions that will be allocated by
 291   // this method will be found dead by the marking cycle).
 292   void allocate_dummy_regions() PRODUCT_RETURN;
 293 
 294   // If the HR printer is active, dump the state of the regions in the
 295   // heap after a compaction.
 296   void print_hrm_post_compaction();
 297 
 298   // Create a memory mapper for auxiliary data structures of the given size and
 299   // translation factor.
 300   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 301                                                          size_t size,
 302                                                          size_t translation_factor);
 303 
 304   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 305 
 306   // These are macros so that, if the assert fires, we get the correct
 307   // line number, file, etc.
 308 
 309 #define heap_locking_asserts_params(_extra_message_)                          \
 310   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 311   (_extra_message_),                                                          \
 312   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 313   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 314   BOOL_TO_STR(Thread::current()->is_VM_thread())
 315 
 316 #define assert_heap_locked()                                                  \
 317   do {                                                                        \
 318     assert(Heap_lock->owned_by_self(),                                        \
 319            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 320   } while (0)
 321 
 322 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 323   do {                                                                        \
 324     assert(Heap_lock->owned_by_self() ||                                      \
 325            (SafepointSynchronize::is_at_safepoint() &&                        \
 326              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 327            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 328                                         "should be at a safepoint"));         \
 329   } while (0)
 330 
 331 #define assert_heap_locked_and_not_at_safepoint()                             \
 332   do {                                                                        \
 333     assert(Heap_lock->owned_by_self() &&                                      \
 334                                     !SafepointSynchronize::is_at_safepoint(), \
 335           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 336                                        "should not be at a safepoint"));      \
 337   } while (0)
 338 
 339 #define assert_heap_not_locked()                                              \
 340   do {                                                                        \
 341     assert(!Heap_lock->owned_by_self(),                                       \
 342         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 343   } while (0)
 344 
 345 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 346   do {                                                                        \
 347     assert(!Heap_lock->owned_by_self() &&                                     \
 348                                     !SafepointSynchronize::is_at_safepoint(), \
 349       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 350                                    "should not be at a safepoint"));          \
 351   } while (0)
 352 
 353 #define assert_at_safepoint_on_vm_thread()                                    \
 354   do {                                                                        \
 355     assert_at_safepoint();                                                    \
 356     assert(Thread::current_or_null() != NULL, "no current thread");           \
 357     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 358   } while (0)
 359 
 360 #ifdef ASSERT
 361 #define assert_used_and_recalculate_used_equal(g1h)                           \
 362   do {                                                                        \
 363     size_t cur_used_bytes = g1h->used();                                      \
 364     size_t recal_used_bytes = g1h->recalculate_used();                        \
 365     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 366            " same as recalculated used(" SIZE_FORMAT ").",                    \
 367            cur_used_bytes, recal_used_bytes);                                 \
 368   } while (0)
 369 #else
 370 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 371 #endif
 372 
 373   const char* young_gc_name() const;
 374 
 375   // The young region list.
 376   G1EdenRegions _eden;
 377   G1SurvivorRegions _survivor;
 378 
 379   STWGCTimer* _gc_timer_stw;
 380 
 381   G1NewTracer* _gc_tracer_stw;
 382 
 383   // The current policy object for the collector.
 384   G1Policy* _policy;
 385   G1HeapSizingPolicy* _heap_sizing_policy;
 386 
 387   G1CollectionSet _collection_set;
 388 
 389   // Try to allocate a single non-humongous HeapRegion sufficient for
 390   // an allocation of the given word_size. If do_expand is true,
 391   // attempt to expand the heap if necessary to satisfy the allocation
 392   // request. 'type' takes the type of region to be allocated. (Use constants
 393   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 394   HeapRegion* new_region(size_t word_size,
 395                          HeapRegionType type,
 396                          bool do_expand,
 397                          uint node_index = G1NUMA::AnyNodeIndex);
 398 
 399   // Initialize a contiguous set of free regions of length num_regions
 400   // and starting at index first so that they appear as a single
 401   // humongous region.
 402   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
 403                                                       uint num_regions,
 404                                                       size_t word_size);
 405 
 406   // Attempt to allocate a humongous object of the given size. Return
 407   // NULL if unsuccessful.
 408   HeapWord* humongous_obj_allocate(size_t word_size);
 409 
 410   // The following two methods, allocate_new_tlab() and
 411   // mem_allocate(), are the two main entry points from the runtime
 412   // into the G1's allocation routines. They have the following
 413   // assumptions:
 414   //
 415   // * They should both be called outside safepoints.
 416   //
 417   // * They should both be called without holding the Heap_lock.
 418   //
 419   // * All allocation requests for new TLABs should go to
 420   //   allocate_new_tlab().
 421   //
 422   // * All non-TLAB allocation requests should go to mem_allocate().
 423   //
 424   // * If either call cannot satisfy the allocation request using the
 425   //   current allocating region, they will try to get a new one. If
 426   //   this fails, they will attempt to do an evacuation pause and
 427   //   retry the allocation.
 428   //
 429   // * If all allocation attempts fail, even after trying to schedule
 430   //   an evacuation pause, allocate_new_tlab() will return NULL,
 431   //   whereas mem_allocate() will attempt a heap expansion and/or
 432   //   schedule a Full GC.
 433   //
 434   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 435   //   should never be called with word_size being humongous. All
 436   //   humongous allocation requests should go to mem_allocate() which
 437   //   will satisfy them with a special path.
 438 
 439   virtual HeapWord* allocate_new_tlab(size_t min_size,
 440                                       size_t requested_size,
 441                                       size_t* actual_size);
 442 
 443   virtual HeapWord* mem_allocate(size_t word_size,
 444                                  bool*  gc_overhead_limit_was_exceeded);
 445 
 446   // First-level mutator allocation attempt: try to allocate out of
 447   // the mutator alloc region without taking the Heap_lock. This
 448   // should only be used for non-humongous allocations.
 449   inline HeapWord* attempt_allocation(size_t min_word_size,
 450                                       size_t desired_word_size,
 451                                       size_t* actual_word_size);
 452 
 453   // Second-level mutator allocation attempt: take the Heap_lock and
 454   // retry the allocation attempt, potentially scheduling a GC
 455   // pause. This should only be used for non-humongous allocations.
 456   HeapWord* attempt_allocation_slow(size_t word_size);
 457 
 458   // Takes the Heap_lock and attempts a humongous allocation. It can
 459   // potentially schedule a GC pause.
 460   HeapWord* attempt_allocation_humongous(size_t word_size);
 461 
 462   // Allocation attempt that should be called during safepoints (e.g.,
 463   // at the end of a successful GC). expect_null_mutator_alloc_region
 464   // specifies whether the mutator alloc region is expected to be NULL
 465   // or not.
 466   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 467                                             bool expect_null_mutator_alloc_region);
 468 
 469   // These methods are the "callbacks" from the G1AllocRegion class.
 470 
 471   // For mutator alloc regions.
 472   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 473   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 474                                    size_t allocated_bytes);
 475 
 476   // For GC alloc regions.
 477   bool has_more_regions(G1HeapRegionAttr dest);
 478   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
 479   void retire_gc_alloc_region(HeapRegion* alloc_region,
 480                               size_t allocated_bytes, G1HeapRegionAttr dest);
 481 
 482   // - if explicit_gc is true, the GC is for a System.gc() etc,
 483   //   otherwise it's for a failed allocation.
 484   // - if clear_all_soft_refs is true, all soft references should be
 485   //   cleared during the GC.
 486   // - it returns false if it is unable to do the collection due to the
 487   //   GC locker being active, true otherwise.
 488   bool do_full_collection(bool explicit_gc,
 489                           bool clear_all_soft_refs);
 490 
 491   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 492   virtual void do_full_collection(bool clear_all_soft_refs);
 493 
 494   // Callback from VM_G1CollectForAllocation operation.
 495   // This function does everything necessary/possible to satisfy a
 496   // failed allocation request (including collection, expansion, etc.)
 497   HeapWord* satisfy_failed_allocation(size_t word_size,
 498                                       bool* succeeded);
 499   // Internal helpers used during full GC to split it up to
 500   // increase readability.
 501   void abort_concurrent_cycle();
 502   void verify_before_full_collection(bool explicit_gc);
 503   void prepare_heap_for_full_collection();
 504   void prepare_heap_for_mutators();
 505   void abort_refinement();
 506   void verify_after_full_collection();
 507   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 508 
 509   // Helper method for satisfy_failed_allocation()
 510   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 511                                              bool do_gc,
 512                                              bool clear_all_soft_refs,
 513                                              bool expect_null_mutator_alloc_region,
 514                                              bool* gc_succeeded);
 515 
 516   // Attempting to expand the heap sufficiently
 517   // to support an allocation of the given "word_size".  If
 518   // successful, perform the allocation and return the address of the
 519   // allocated block, or else "NULL".
 520   HeapWord* expand_and_allocate(size_t word_size);
 521 
 522   // Process any reference objects discovered.
 523   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 524 
 525   // If during an initial mark pause we may install a pending list head which is not
 526   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 527   // to discover.
 528   void make_pending_list_reachable();
 529 
 530   // Merges the information gathered on a per-thread basis for all worker threads
 531   // during GC into global variables.
 532   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 533 public:
 534   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 535 
 536   WorkGang* workers() const { return _workers; }
 537 
 538   // Runs the given AbstractGangTask with the current active workers, returning the
 539   // total time taken.
 540   Tickspan run_task(AbstractGangTask* task);
 541 
 542   G1Allocator* allocator() {
 543     return _allocator;
 544   }
 545 
 546   G1HeapVerifier* verifier() {
 547     return _verifier;
 548   }
 549 
 550   G1MonitoringSupport* g1mm() {
 551     assert(_g1mm != NULL, "should have been initialized");
 552     return _g1mm;
 553   }
 554 
 555   void resize_heap_if_necessary();
 556 
 557   G1NUMA* numa() const { return _numa; }
 558 
 559   // Expand the garbage-first heap by at least the given size (in bytes!).
 560   // Returns true if the heap was expanded by the requested amount;
 561   // false otherwise.
 562   // (Rounds up to a HeapRegion boundary.)
 563   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 564   bool expand_single_region(uint node_index);
 565 
 566   // Returns the PLAB statistics for a given destination.
 567   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 568 
 569   // Determines PLAB size for a given destination.
 570   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 571 
 572   // Do anything common to GC's.
 573   void gc_prologue(bool full);
 574   void gc_epilogue(bool full);
 575 
 576   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 577   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 578 
 579   // Modify the reclaim candidate set and test for presence.
 580   // These are only valid for starts_humongous regions.
 581   inline void set_humongous_reclaim_candidate(uint region, bool value);
 582   inline bool is_humongous_reclaim_candidate(uint region);
 583 
 584   // Remove from the reclaim candidate set.  Also remove from the
 585   // collection set so that later encounters avoid the slow path.
 586   inline void set_humongous_is_live(oop obj);
 587 
 588   // Register the given region to be part of the collection set.
 589   inline void register_humongous_region_with_region_attr(uint index);
 590   // Update region attributes table with information about all regions.
 591   void register_regions_with_region_attr();
 592   // We register a region with the fast "in collection set" test. We
 593   // simply set to true the array slot corresponding to this region.
 594   void register_young_region_with_region_attr(HeapRegion* r) {
 595     _region_attr.set_in_young(r->hrm_index());
 596   }
 597   inline void register_region_with_region_attr(HeapRegion* r);
 598   inline void register_old_region_with_region_attr(HeapRegion* r);
 599   inline void register_optional_region_with_region_attr(HeapRegion* r);
 600 
 601   void clear_region_attr(const HeapRegion* hr) {
 602     _region_attr.clear(hr);
 603   }
 604 
 605   void clear_region_attr() {
 606     _region_attr.clear();
 607   }
 608 
 609   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 610   // for all regions.
 611   void verify_region_attr_remset_update() PRODUCT_RETURN;
 612 
 613   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 614 
 615   // This is called at the start of either a concurrent cycle or a Full
 616   // GC to update the number of old marking cycles started.
 617   void increment_old_marking_cycles_started();
 618 
 619   // This is called at the end of either a concurrent cycle or a Full
 620   // GC to update the number of old marking cycles completed. Those two
 621   // can happen in a nested fashion, i.e., we start a concurrent
 622   // cycle, a Full GC happens half-way through it which ends first,
 623   // and then the cycle notices that a Full GC happened and ends
 624   // too. The concurrent parameter is a boolean to help us do a bit
 625   // tighter consistency checking in the method. If concurrent is
 626   // false, the caller is the inner caller in the nesting (i.e., the
 627   // Full GC). If concurrent is true, the caller is the outer caller
 628   // in this nesting (i.e., the concurrent cycle). Further nesting is
 629   // not currently supported. The end of this call also notifies
 630   // the FullGCCount_lock in case a Java thread is waiting for a full
 631   // GC to happen (e.g., it called System.gc() with
 632   // +ExplicitGCInvokesConcurrent).
 633   void increment_old_marking_cycles_completed(bool concurrent);
 634 
 635   uint old_marking_cycles_completed() {
 636     return _old_marking_cycles_completed;
 637   }
 638 
 639   G1HRPrinter* hr_printer() { return &_hr_printer; }
 640 
 641   // Allocates a new heap region instance.
 642   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 643 
 644   // Allocate the highest free region in the reserved heap. This will commit
 645   // regions as necessary.
 646   HeapRegion* alloc_highest_free_region();
 647 
 648   // Frees a non-humongous region by initializing its contents and
 649   // adding it to the free list that's passed as a parameter (this is
 650   // usually a local list which will be appended to the master free
 651   // list later). The used bytes of freed regions are accumulated in
 652   // pre_used. If skip_remset is true, the region's RSet will not be freed
 653   // up. If skip_hot_card_cache is true, the region's hot card cache will not
 654   // be freed up. The assumption is that this will be done later.
 655   // The locked parameter indicates if the caller has already taken
 656   // care of proper synchronization. This may allow some optimizations.
 657   void free_region(HeapRegion* hr,
 658                    FreeRegionList* free_list,
 659                    bool skip_remset,
 660                    bool skip_hot_card_cache = false,
 661                    bool locked = false);
 662 
 663   // It dirties the cards that cover the block so that the post
 664   // write barrier never queues anything when updating objects on this
 665   // block. It is assumed (and in fact we assert) that the block
 666   // belongs to a young region.
 667   inline void dirty_young_block(HeapWord* start, size_t word_size);
 668 
 669   // Frees a humongous region by collapsing it into individual regions
 670   // and calling free_region() for each of them. The freed regions
 671   // will be added to the free list that's passed as a parameter (this
 672   // is usually a local list which will be appended to the master free
 673   // list later).
 674   // The method assumes that only a single thread is ever calling
 675   // this for a particular region at once.
 676   void free_humongous_region(HeapRegion* hr,
 677                              FreeRegionList* free_list);
 678 
 679   // Facility for allocating in 'archive' regions in high heap memory and
 680   // recording the allocated ranges. These should all be called from the
 681   // VM thread at safepoints, without the heap lock held. They can be used
 682   // to create and archive a set of heap regions which can be mapped at the
 683   // same fixed addresses in a subsequent JVM invocation.
 684   void begin_archive_alloc_range(bool open = false);
 685 
 686   // Check if the requested size would be too large for an archive allocation.
 687   bool is_archive_alloc_too_large(size_t word_size);
 688 
 689   // Allocate memory of the requested size from the archive region. This will
 690   // return NULL if the size is too large or if no memory is available. It
 691   // does not trigger a garbage collection.
 692   HeapWord* archive_mem_allocate(size_t word_size);
 693 
 694   // Optionally aligns the end address and returns the allocated ranges in
 695   // an array of MemRegions in order of ascending addresses.
 696   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 697                                size_t end_alignment_in_bytes = 0);
 698 
 699   // Facility for allocating a fixed range within the heap and marking
 700   // the containing regions as 'archive'. For use at JVM init time, when the
 701   // caller may mmap archived heap data at the specified range(s).
 702   // Verify that the MemRegions specified in the argument array are within the
 703   // reserved heap.
 704   bool check_archive_addresses(MemRegion* range, size_t count);
 705 
 706   // Commit the appropriate G1 regions containing the specified MemRegions
 707   // and mark them as 'archive' regions. The regions in the array must be
 708   // non-overlapping and in order of ascending address.
 709   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 710 
 711   // Insert any required filler objects in the G1 regions around the specified
 712   // ranges to make the regions parseable. This must be called after
 713   // alloc_archive_regions, and after class loading has occurred.
 714   void fill_archive_regions(MemRegion* range, size_t count);
 715 
 716   // For each of the specified MemRegions, uncommit the containing G1 regions
 717   // which had been allocated by alloc_archive_regions. This should be called
 718   // rather than fill_archive_regions at JVM init time if the archive file
 719   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 720   void dealloc_archive_regions(MemRegion* range, size_t count, bool is_open);
 721 
 722   oop materialize_archived_object(oop obj);
 723 
 724 private:
 725 
 726   // Shrink the garbage-first heap by at most the given size (in bytes!).
 727   // (Rounds down to a HeapRegion boundary.)
 728   void shrink(size_t expand_bytes);
 729   void shrink_helper(size_t expand_bytes);
 730 
 731   #if TASKQUEUE_STATS
 732   static void print_taskqueue_stats_hdr(outputStream* const st);
 733   void print_taskqueue_stats() const;
 734   void reset_taskqueue_stats();
 735   #endif // TASKQUEUE_STATS
 736 
 737   // Schedule the VM operation that will do an evacuation pause to
 738   // satisfy an allocation request of word_size. *succeeded will
 739   // return whether the VM operation was successful (it did do an
 740   // evacuation pause) or not (another thread beat us to it or the GC
 741   // locker was active). Given that we should not be holding the
 742   // Heap_lock when we enter this method, we will pass the
 743   // gc_count_before (i.e., total_collections()) as a parameter since
 744   // it has to be read while holding the Heap_lock. Currently, both
 745   // methods that call do_collection_pause() release the Heap_lock
 746   // before the call, so it's easy to read gc_count_before just before.
 747   HeapWord* do_collection_pause(size_t         word_size,
 748                                 uint           gc_count_before,
 749                                 bool*          succeeded,
 750                                 GCCause::Cause gc_cause);
 751 
 752   void wait_for_root_region_scanning();
 753 
 754   // The guts of the incremental collection pause, executed by the vm
 755   // thread. It returns false if it is unable to do the collection due
 756   // to the GC locker being active, true otherwise
 757   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 758 
 759   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 760   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 761   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 762 
 763   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 764 
 765   // Actually do the work of evacuating the parts of the collection set.
 766   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 767   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 768 private:
 769   // Evacuate the next set of optional regions.
 770   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 771 
 772 public:
 773   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 774   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 775                                     G1RedirtyCardsQueueSet* rdcqs,
 776                                     G1ParScanThreadStateSet* pss);
 777 
 778   void expand_heap_after_young_collection();
 779   // Update object copying statistics.
 780   void record_obj_copy_mem_stats();
 781 
 782   // The hot card cache for remembered set insertion optimization.
 783   G1HotCardCache* _hot_card_cache;
 784 
 785   // The g1 remembered set of the heap.
 786   G1RemSet* _rem_set;
 787 
 788   // After a collection pause, convert the regions in the collection set into free
 789   // regions.
 790   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 791 
 792   // Abandon the current collection set without recording policy
 793   // statistics or updating free lists.
 794   void abandon_collection_set(G1CollectionSet* collection_set);
 795 
 796   // The concurrent marker (and the thread it runs in.)
 797   G1ConcurrentMark* _cm;
 798   G1ConcurrentMarkThread* _cm_thread;
 799 
 800   // The concurrent refiner.
 801   G1ConcurrentRefine* _cr;
 802 
 803   // The parallel task queues
 804   RefToScanQueueSet *_task_queues;
 805 
 806   // True iff a evacuation has failed in the current collection.
 807   bool _evacuation_failed;
 808 
 809   EvacuationFailedInfo* _evacuation_failed_info_array;
 810 
 811   // Failed evacuations cause some logical from-space objects to have
 812   // forwarding pointers to themselves.  Reset them.
 813   void remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs);
 814 
 815   // Restore the objects in the regions in the collection set after an
 816   // evacuation failure.
 817   void restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs);
 818 
 819   PreservedMarksSet _preserved_marks_set;
 820 
 821   // Preserve the mark of "obj", if necessary, in preparation for its mark
 822   // word being overwritten with a self-forwarding-pointer.
 823   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
 824 
 825 #ifndef PRODUCT
 826   // Support for forcing evacuation failures. Analogous to
 827   // PromotionFailureALot for the other collectors.
 828 
 829   // Records whether G1EvacuationFailureALot should be in effect
 830   // for the current GC
 831   bool _evacuation_failure_alot_for_current_gc;
 832 
 833   // Used to record the GC number for interval checking when
 834   // determining whether G1EvaucationFailureALot is in effect
 835   // for the current GC.
 836   size_t _evacuation_failure_alot_gc_number;
 837 
 838   // Count of the number of evacuations between failures.
 839   volatile size_t _evacuation_failure_alot_count;
 840 
 841   // Set whether G1EvacuationFailureALot should be in effect
 842   // for the current GC (based upon the type of GC and which
 843   // command line flags are set);
 844   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 845                                                   bool during_initial_mark,
 846                                                   bool mark_or_rebuild_in_progress);
 847 
 848   inline void set_evacuation_failure_alot_for_current_gc();
 849 
 850   // Return true if it's time to cause an evacuation failure.
 851   inline bool evacuation_should_fail();
 852 
 853   // Reset the G1EvacuationFailureALot counters.  Should be called at
 854   // the end of an evacuation pause in which an evacuation failure occurred.
 855   inline void reset_evacuation_should_fail();
 856 #endif // !PRODUCT
 857 
 858   // ("Weak") Reference processing support.
 859   //
 860   // G1 has 2 instances of the reference processor class. One
 861   // (_ref_processor_cm) handles reference object discovery
 862   // and subsequent processing during concurrent marking cycles.
 863   //
 864   // The other (_ref_processor_stw) handles reference object
 865   // discovery and processing during full GCs and incremental
 866   // evacuation pauses.
 867   //
 868   // During an incremental pause, reference discovery will be
 869   // temporarily disabled for _ref_processor_cm and will be
 870   // enabled for _ref_processor_stw. At the end of the evacuation
 871   // pause references discovered by _ref_processor_stw will be
 872   // processed and discovery will be disabled. The previous
 873   // setting for reference object discovery for _ref_processor_cm
 874   // will be re-instated.
 875   //
 876   // At the start of marking:
 877   //  * Discovery by the CM ref processor is verified to be inactive
 878   //    and it's discovered lists are empty.
 879   //  * Discovery by the CM ref processor is then enabled.
 880   //
 881   // At the end of marking:
 882   //  * Any references on the CM ref processor's discovered
 883   //    lists are processed (possibly MT).
 884   //
 885   // At the start of full GC we:
 886   //  * Disable discovery by the CM ref processor and
 887   //    empty CM ref processor's discovered lists
 888   //    (without processing any entries).
 889   //  * Verify that the STW ref processor is inactive and it's
 890   //    discovered lists are empty.
 891   //  * Temporarily set STW ref processor discovery as single threaded.
 892   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 893   //    field.
 894   //  * Finally enable discovery by the STW ref processor.
 895   //
 896   // The STW ref processor is used to record any discovered
 897   // references during the full GC.
 898   //
 899   // At the end of a full GC we:
 900   //  * Enqueue any reference objects discovered by the STW ref processor
 901   //    that have non-live referents. This has the side-effect of
 902   //    making the STW ref processor inactive by disabling discovery.
 903   //  * Verify that the CM ref processor is still inactive
 904   //    and no references have been placed on it's discovered
 905   //    lists (also checked as a precondition during initial marking).
 906 
 907   // The (stw) reference processor...
 908   ReferenceProcessor* _ref_processor_stw;
 909 
 910   // During reference object discovery, the _is_alive_non_header
 911   // closure (if non-null) is applied to the referent object to
 912   // determine whether the referent is live. If so then the
 913   // reference object does not need to be 'discovered' and can
 914   // be treated as a regular oop. This has the benefit of reducing
 915   // the number of 'discovered' reference objects that need to
 916   // be processed.
 917   //
 918   // Instance of the is_alive closure for embedding into the
 919   // STW reference processor as the _is_alive_non_header field.
 920   // Supplying a value for the _is_alive_non_header field is
 921   // optional but doing so prevents unnecessary additions to
 922   // the discovered lists during reference discovery.
 923   G1STWIsAliveClosure _is_alive_closure_stw;
 924 
 925   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 926 
 927   // The (concurrent marking) reference processor...
 928   ReferenceProcessor* _ref_processor_cm;
 929 
 930   // Instance of the concurrent mark is_alive closure for embedding
 931   // into the Concurrent Marking reference processor as the
 932   // _is_alive_non_header field. Supplying a value for the
 933   // _is_alive_non_header field is optional but doing so prevents
 934   // unnecessary additions to the discovered lists during reference
 935   // discovery.
 936   G1CMIsAliveClosure _is_alive_closure_cm;
 937 
 938   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 939 public:
 940 
 941   RefToScanQueue *task_queue(uint i) const;
 942 
 943   uint num_task_queues() const;
 944 
 945   // Create a G1CollectedHeap.
 946   // Must call the initialize method afterwards.
 947   // May not return if something goes wrong.
 948   G1CollectedHeap();
 949 
 950 private:
 951   jint initialize_concurrent_refinement();
 952   jint initialize_young_gen_sampling_thread();
 953 public:
 954   // Initialize the G1CollectedHeap to have the initial and
 955   // maximum sizes and remembered and barrier sets
 956   // specified by the policy object.
 957   jint initialize();
 958 
 959   virtual void stop();
 960   virtual void safepoint_synchronize_begin();
 961   virtual void safepoint_synchronize_end();
 962 
 963   // Does operations required after initialization has been done.
 964   void post_initialize();
 965 
 966   // Initialize weak reference processing.
 967   void ref_processing_init();
 968 
 969   virtual Name kind() const {
 970     return CollectedHeap::G1;
 971   }
 972 
 973   virtual const char* name() const {
 974     return "G1";
 975   }
 976 
 977   const G1CollectorState* collector_state() const { return &_collector_state; }
 978   G1CollectorState* collector_state() { return &_collector_state; }
 979 
 980   // The current policy object for the collector.
 981   G1Policy* policy() const { return _policy; }
 982   // The remembered set.
 983   G1RemSet* rem_set() const { return _rem_set; }
 984 
 985   inline G1GCPhaseTimes* phase_times() const;
 986 
 987   HeapRegionManager* hrm() const { return _hrm; }
 988 
 989   const G1CollectionSet* collection_set() const { return &_collection_set; }
 990   G1CollectionSet* collection_set() { return &_collection_set; }
 991 
 992   virtual SoftRefPolicy* soft_ref_policy();
 993 
 994   virtual void initialize_serviceability();
 995   virtual MemoryUsage memory_usage();
 996   virtual GrowableArray<GCMemoryManager*> memory_managers();
 997   virtual GrowableArray<MemoryPool*> memory_pools();
 998 
 999   // Try to minimize the remembered set.
1000   void scrub_rem_set();
1001 
1002   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1003   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
1004 
1005   // The shared block offset table array.
1006   G1BlockOffsetTable* bot() const { return _bot; }
1007 
1008   // Reference Processing accessors
1009 
1010   // The STW reference processor....
1011   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1012 
1013   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1014 
1015   // The Concurrent Marking reference processor...
1016   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1017 
1018   size_t unused_committed_regions_in_bytes() const;
1019 
1020   virtual size_t capacity() const;
1021   virtual size_t used() const;
1022   // This should be called when we're not holding the heap lock. The
1023   // result might be a bit inaccurate.
1024   size_t used_unlocked() const;
1025   size_t recalculate_used() const;
1026 
1027   // These virtual functions do the actual allocation.
1028   // Some heaps may offer a contiguous region for shared non-blocking
1029   // allocation, via inlined code (by exporting the address of the top and
1030   // end fields defining the extent of the contiguous allocation region.)
1031   // But G1CollectedHeap doesn't yet support this.
1032 
1033   virtual bool is_maximal_no_gc() const {
1034     return _hrm->available() == 0;
1035   }
1036 
1037   // Returns whether there are any regions left in the heap for allocation.
1038   bool has_regions_left_for_allocation() const {
1039     return !is_maximal_no_gc() || num_free_regions() != 0;
1040   }
1041 
1042   // The current number of regions in the heap.
1043   uint num_regions() const { return _hrm->length(); }
1044 
1045   // The max number of regions in the heap.
1046   uint max_regions() const { return _hrm->max_length(); }
1047 
1048   // Max number of regions that can be comitted.
1049   uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
1050 
1051   // The number of regions that are completely free.
1052   uint num_free_regions() const { return _hrm->num_free_regions(); }
1053 
1054   // The number of regions that can be allocated into.
1055   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1056 
1057   MemoryUsage get_auxiliary_data_memory_usage() const {
1058     return _hrm->get_auxiliary_data_memory_usage();
1059   }
1060 
1061   // The number of regions that are not completely free.
1062   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1063 
1064 #ifdef ASSERT
1065   bool is_on_master_free_list(HeapRegion* hr) {
1066     return _hrm->is_free(hr);
1067   }
1068 #endif // ASSERT
1069 
1070   inline void old_set_add(HeapRegion* hr);
1071   inline void old_set_remove(HeapRegion* hr);
1072 
1073   inline void archive_set_add(HeapRegion* hr);
1074 
1075   size_t non_young_capacity_bytes() {
1076     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1077   }
1078 
1079   // Determine whether the given region is one that we are using as an
1080   // old GC alloc region.
1081   bool is_old_gc_alloc_region(HeapRegion* hr);
1082 
1083   // Perform a collection of the heap; intended for use in implementing
1084   // "System.gc".  This probably implies as full a collection as the
1085   // "CollectedHeap" supports.
1086   virtual void collect(GCCause::Cause cause);
1087 
1088   // Perform a collection of the heap with the given cause; if the VM operation
1089   // fails to execute for any reason, retry only if retry_on_gc_failure is set.
1090   // Returns whether this collection actually executed.
1091   bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure);
1092 
1093   // True iff an evacuation has failed in the most-recent collection.
1094   bool evacuation_failed() { return _evacuation_failed; }
1095 
1096   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1097   void prepend_to_freelist(FreeRegionList* list);
1098   void decrement_summary_bytes(size_t bytes);
1099 
1100   virtual bool is_in(const void* p) const;
1101 #ifdef ASSERT
1102   // Returns whether p is in one of the available areas of the heap. Slow but
1103   // extensive version.
1104   bool is_in_exact(const void* p) const;
1105 #endif
1106 
1107   // Return "TRUE" iff the given object address is within the collection
1108   // set. Assumes that the reference points into the heap.
1109   inline bool is_in_cset(const HeapRegion *hr);
1110   inline bool is_in_cset(oop obj);
1111   inline bool is_in_cset(HeapWord* addr);
1112 
1113   inline bool is_in_cset_or_humongous(const oop obj);
1114 
1115  private:
1116   // This array is used for a quick test on whether a reference points into
1117   // the collection set or not. Each of the array's elements denotes whether the
1118   // corresponding region is in the collection set or not.
1119   G1HeapRegionAttrBiasedMappedArray _region_attr;
1120 
1121  public:
1122 
1123   inline G1HeapRegionAttr region_attr(const void* obj) const;
1124   inline G1HeapRegionAttr region_attr(uint idx) const;
1125 
1126   // Return "TRUE" iff the given object address is in the reserved
1127   // region of g1.
1128   bool is_in_g1_reserved(const void* p) const {
1129     return _hrm->reserved().contains(p);
1130   }
1131 
1132   // Returns a MemRegion that corresponds to the space that has been
1133   // reserved for the heap
1134   MemRegion g1_reserved() const {
1135     return _hrm->reserved();
1136   }
1137 
1138   MemRegion reserved_region() const {
1139     return _reserved;
1140   }
1141 
1142   HeapWord* base() const {
1143     return _reserved.start();
1144   }
1145 
1146   bool is_in_reserved(const void* addr) const {
1147     return _reserved.contains(addr);
1148   }
1149 
1150   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1151 
1152   G1CardTable* card_table() const {
1153     return _card_table;
1154   }
1155 
1156   // Iteration functions.
1157 
1158   // Iterate over all objects, calling "cl.do_object" on each.
1159   virtual void object_iterate(ObjectClosure* cl);
1160 
1161   virtual void safe_object_iterate(ObjectClosure* cl) {
1162     object_iterate(cl);
1163   }
1164 
1165   // Iterate over heap regions, in address order, terminating the
1166   // iteration early if the "do_heap_region" method returns "true".
1167   void heap_region_iterate(HeapRegionClosure* blk) const;
1168 
1169   // Return the region with the given index. It assumes the index is valid.
1170   inline HeapRegion* region_at(uint index) const;
1171   inline HeapRegion* region_at_or_null(uint index) const;
1172 
1173   // Return the next region (by index) that is part of the same
1174   // humongous object that hr is part of.
1175   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1176 
1177   // Calculate the region index of the given address. Given address must be
1178   // within the heap.
1179   inline uint addr_to_region(HeapWord* addr) const;
1180 
1181   inline HeapWord* bottom_addr_for_region(uint index) const;
1182 
1183   // Two functions to iterate over the heap regions in parallel. Threads
1184   // compete using the HeapRegionClaimer to claim the regions before
1185   // applying the closure on them.
1186   // The _from_worker_offset version uses the HeapRegionClaimer and
1187   // the worker id to calculate a start offset to prevent all workers to
1188   // start from the point.
1189   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1190                                                   HeapRegionClaimer* hrclaimer,
1191                                                   uint worker_id) const;
1192 
1193   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1194                                           HeapRegionClaimer* hrclaimer) const;
1195 
1196   // Iterate over all regions currently in the current collection set.
1197   void collection_set_iterate_all(HeapRegionClosure* blk);
1198 
1199   // Iterate over the regions in the current increment of the collection set.
1200   // Starts the iteration so that the start regions of a given worker id over the
1201   // set active_workers are evenly spread across the set of collection set regions
1202   // to be iterated.
1203   // The variant with the HeapRegionClaimer guarantees that the closure will be
1204   // applied to a particular region exactly once.
1205   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1206     collection_set_iterate_increment_from(blk, NULL, worker_id);
1207   }
1208   void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1209 
1210   // Returns the HeapRegion that contains addr. addr must not be NULL.
1211   template <class T>
1212   inline HeapRegion* heap_region_containing(const T addr) const;
1213 
1214   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1215   // region. addr must not be NULL.
1216   template <class T>
1217   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1218 
1219   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1220   // each address in the (reserved) heap is a member of exactly
1221   // one block.  The defining characteristic of a block is that it is
1222   // possible to find its size, and thus to progress forward to the next
1223   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1224   // represent Java objects, or they might be free blocks in a
1225   // free-list-based heap (or subheap), as long as the two kinds are
1226   // distinguishable and the size of each is determinable.
1227 
1228   // Returns the address of the start of the "block" that contains the
1229   // address "addr".  We say "blocks" instead of "object" since some heaps
1230   // may not pack objects densely; a chunk may either be an object or a
1231   // non-object.
1232   HeapWord* block_start(const void* addr) const;
1233 
1234   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1235   // the block is an object.
1236   bool block_is_obj(const HeapWord* addr) const;
1237 
1238   // Section on thread-local allocation buffers (TLABs)
1239   // See CollectedHeap for semantics.
1240 
1241   bool supports_tlab_allocation() const;
1242   size_t tlab_capacity(Thread* ignored) const;
1243   size_t tlab_used(Thread* ignored) const;
1244   size_t max_tlab_size() const;
1245   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1246 
1247   inline bool is_in_young(const oop obj);
1248 
1249   // Returns "true" iff the given word_size is "very large".
1250   static bool is_humongous(size_t word_size) {
1251     // Note this has to be strictly greater-than as the TLABs
1252     // are capped at the humongous threshold and we want to
1253     // ensure that we don't try to allocate a TLAB as
1254     // humongous and that we don't allocate a humongous
1255     // object in a TLAB.
1256     return word_size > _humongous_object_threshold_in_words;
1257   }
1258 
1259   // Returns the humongous threshold for a specific region size
1260   static size_t humongous_threshold_for(size_t region_size) {
1261     return (region_size / 2);
1262   }
1263 
1264   // Returns the number of regions the humongous object of the given word size
1265   // requires.
1266   static size_t humongous_obj_size_in_regions(size_t word_size);
1267 
1268   // Print the maximum heap capacity.
1269   virtual size_t max_capacity() const;
1270 
1271   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1272   virtual size_t max_reserved_capacity() const;
1273 
1274   virtual jlong millis_since_last_gc();
1275 
1276 
1277   // Convenience function to be used in situations where the heap type can be
1278   // asserted to be this type.
1279   static G1CollectedHeap* heap();
1280 
1281   void set_region_short_lived_locked(HeapRegion* hr);
1282   // add appropriate methods for any other surv rate groups
1283 
1284   const G1SurvivorRegions* survivor() const { return &_survivor; }
1285 
1286   uint eden_regions_count() const { return _eden.length(); }
1287   uint survivor_regions_count() const { return _survivor.length(); }
1288   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1289   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1290   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1291   uint old_regions_count() const { return _old_set.length(); }
1292   uint archive_regions_count() const { return _archive_set.length(); }
1293   uint humongous_regions_count() const { return _humongous_set.length(); }
1294 
1295 #ifdef ASSERT
1296   bool check_young_list_empty();
1297 #endif
1298 
1299   // *** Stuff related to concurrent marking.  It's not clear to me that so
1300   // many of these need to be public.
1301 
1302   // The functions below are helper functions that a subclass of
1303   // "CollectedHeap" can use in the implementation of its virtual
1304   // functions.
1305   // This performs a concurrent marking of the live objects in a
1306   // bitmap off to the side.
1307   void do_concurrent_mark();
1308 
1309   bool is_marked_next(oop obj) const;
1310 
1311   // Determine if an object is dead, given the object and also
1312   // the region to which the object belongs. An object is dead
1313   // iff a) it was not allocated since the last mark, b) it
1314   // is not marked, and c) it is not in an archive region.
1315   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1316     return
1317       hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1318       !hr->is_archive();
1319   }
1320 
1321   // This function returns true when an object has been
1322   // around since the previous marking and hasn't yet
1323   // been marked during this marking, and is not in an archive region.
1324   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1325     return
1326       !hr->obj_allocated_since_next_marking(obj) &&
1327       !is_marked_next(obj) &&
1328       !hr->is_archive();
1329   }
1330 
1331   // Determine if an object is dead, given only the object itself.
1332   // This will find the region to which the object belongs and
1333   // then call the region version of the same function.
1334 
1335   // Added if it is NULL it isn't dead.
1336 
1337   inline bool is_obj_dead(const oop obj) const;
1338 
1339   inline bool is_obj_ill(const oop obj) const;
1340 
1341   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1342   inline bool is_obj_dead_full(const oop obj) const;
1343 
1344   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1345 
1346   // Refinement
1347 
1348   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1349 
1350   // Optimized nmethod scanning support routines
1351 
1352   // Register the given nmethod with the G1 heap.
1353   virtual void register_nmethod(nmethod* nm);
1354 
1355   // Unregister the given nmethod from the G1 heap.
1356   virtual void unregister_nmethod(nmethod* nm);
1357 
1358   // No nmethod flushing needed.
1359   virtual void flush_nmethod(nmethod* nm) {}
1360 
1361   // No nmethod verification implemented.
1362   virtual void verify_nmethod(nmethod* nm) {}
1363 
1364   // Free up superfluous code root memory.
1365   void purge_code_root_memory();
1366 
1367   // Rebuild the strong code root lists for each region
1368   // after a full GC.
1369   void rebuild_strong_code_roots();
1370 
1371   // Partial cleaning of VM internal data structures.
1372   void string_dedup_cleaning(BoolObjectClosure* is_alive,
1373                              OopClosure* keep_alive,
1374                              G1GCPhaseTimes* phase_times = NULL);
1375 
1376   // Performs cleaning of data structures after class unloading.
1377   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1378 
1379   // Redirty logged cards in the refinement queue.
1380   void redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs);
1381 
1382   // Verification
1383 
1384   // Deduplicate the string
1385   virtual void deduplicate_string(oop str);
1386 
1387   // Perform any cleanup actions necessary before allowing a verification.
1388   virtual void prepare_for_verify();
1389 
1390   // Perform verification.
1391 
1392   // vo == UsePrevMarking -> use "prev" marking information,
1393   // vo == UseNextMarking -> use "next" marking information
1394   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1395   //
1396   // NOTE: Only the "prev" marking information is guaranteed to be
1397   // consistent most of the time, so most calls to this should use
1398   // vo == UsePrevMarking.
1399   // Currently, there is only one case where this is called with
1400   // vo == UseNextMarking, which is to verify the "next" marking
1401   // information at the end of remark.
1402   // Currently there is only one place where this is called with
1403   // vo == UseFullMarking, which is to verify the marking during a
1404   // full GC.
1405   void verify(VerifyOption vo);
1406 
1407   // WhiteBox testing support.
1408   virtual bool supports_concurrent_phase_control() const;
1409   virtual bool request_concurrent_phase(const char* phase);
1410   bool is_heterogeneous_heap() const;
1411 
1412   virtual WorkGang* get_safepoint_workers() { return _workers; }
1413 
1414   // The methods below are here for convenience and dispatch the
1415   // appropriate method depending on value of the given VerifyOption
1416   // parameter. The values for that parameter, and their meanings,
1417   // are the same as those above.
1418 
1419   bool is_obj_dead_cond(const oop obj,
1420                         const HeapRegion* hr,
1421                         const VerifyOption vo) const;
1422 
1423   bool is_obj_dead_cond(const oop obj,
1424                         const VerifyOption vo) const;
1425 
1426   G1HeapSummary create_g1_heap_summary();
1427   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1428 
1429   // Printing
1430 private:
1431   void print_heap_regions() const;
1432   void print_regions_on(outputStream* st) const;
1433 
1434 public:
1435   virtual void print_on(outputStream* st) const;
1436   virtual void print_extended_on(outputStream* st) const;
1437   virtual void print_on_error(outputStream* st) const;
1438 
1439   virtual void print_gc_threads_on(outputStream* st) const;
1440   virtual void gc_threads_do(ThreadClosure* tc) const;
1441 
1442   // Override
1443   void print_tracing_info() const;
1444 
1445   // The following two methods are helpful for debugging RSet issues.
1446   void print_cset_rsets() PRODUCT_RETURN;
1447   void print_all_rsets() PRODUCT_RETURN;
1448 
1449   // Used to print information about locations in the hs_err file.
1450   virtual bool print_location(outputStream* st, void* addr) const;
1451 
1452   size_t pending_card_num();
1453 };
1454 
1455 class G1ParEvacuateFollowersClosure : public VoidClosure {
1456 private:
1457   double _start_term;
1458   double _term_time;
1459   size_t _term_attempts;
1460 
1461   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1462   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1463 protected:
1464   G1CollectedHeap*              _g1h;
1465   G1ParScanThreadState*         _par_scan_state;
1466   RefToScanQueueSet*            _queues;
1467   ParallelTaskTerminator*       _terminator;
1468   G1GCPhaseTimes::GCParPhases   _phase;
1469 
1470   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1471   RefToScanQueueSet*      queues()         { return _queues; }
1472   ParallelTaskTerminator* terminator()     { return _terminator; }
1473 
1474 public:
1475   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1476                                 G1ParScanThreadState* par_scan_state,
1477                                 RefToScanQueueSet* queues,
1478                                 ParallelTaskTerminator* terminator,
1479                                 G1GCPhaseTimes::GCParPhases phase)
1480     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1481       _g1h(g1h), _par_scan_state(par_scan_state),
1482       _queues(queues), _terminator(terminator), _phase(phase) {}
1483 
1484   void do_void();
1485 
1486   double term_time() const { return _term_time; }
1487   size_t term_attempts() const { return _term_attempts; }
1488 
1489 private:
1490   inline bool offer_termination();
1491 };
1492 
1493 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP