1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1EvacuationInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1HeapRegionAttr.hpp"
  43 #include "gc/g1/g1MemoryNodeManager.hpp"
  44 #include "gc/g1/g1MonitoringSupport.hpp"
  45 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  46 #include "gc/g1/g1SurvivorRegions.hpp"
  47 #include "gc/g1/g1YCTypes.hpp"
  48 #include "gc/g1/heapRegionManager.hpp"
  49 #include "gc/g1/heapRegionSet.hpp"
  50 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  51 #include "gc/shared/barrierSet.hpp"
  52 #include "gc/shared/collectedHeap.hpp"
  53 #include "gc/shared/gcHeapSummary.hpp"
  54 #include "gc/shared/plab.hpp"
  55 #include "gc/shared/preservedMarks.hpp"
  56 #include "gc/shared/softRefPolicy.hpp"
  57 #include "memory/memRegion.hpp"
  58 #include "utilities/stack.hpp"
  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1CardTableEntryClosure;
  78 class G1CollectionSet;
  79 class G1Policy;
  80 class G1HotCardCache;
  81 class G1RemSet;
  82 class G1YoungRemSetSamplingThread;
  83 class G1ConcurrentMark;
  84 class G1ConcurrentMarkThread;
  85 class G1ConcurrentRefine;
  86 class GenerationCounters;
  87 class STWGCTimer;
  88 class G1NewTracer;
  89 class EvacuationFailedInfo;
  90 class nmethod;
  91 class WorkGang;
  92 class G1Allocator;
  93 class G1ArchiveAllocator;
  94 class G1FullGCScope;
  95 class G1HeapVerifier;
  96 class G1HeapSizingPolicy;
  97 class G1HeapSummary;
  98 class G1EvacSummary;
  99 
 100 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 101 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
 102 
 103 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 104 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 105 
 106 // The G1 STW is alive closure.
 107 // An instance is embedded into the G1CH and used as the
 108 // (optional) _is_alive_non_header closure in the STW
 109 // reference processor. It is also extensively used during
 110 // reference processing during STW evacuation pauses.
 111 class G1STWIsAliveClosure : public BoolObjectClosure {
 112   G1CollectedHeap* _g1h;
 113 public:
 114   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 115   bool do_object_b(oop p);
 116 };
 117 
 118 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 119   G1CollectedHeap* _g1h;
 120 public:
 121   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 122   bool do_object_b(oop p);
 123 };
 124 
 125 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class G1FreeCollectionSetTask;
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VMStructs;
 138   friend class MutatorAllocRegion;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1EvacuateRegionsTask;
 147   friend class G1PLABAllocator;
 148 
 149   // Other related classes.
 150   friend class HeapRegionClaimer;
 151 
 152   // Testing classes.
 153   friend class G1CheckRegionAttrTableClosure;
 154 
 155 private:
 156   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 157 
 158   WorkGang* _workers;
 159   G1CardTable* _card_table;
 160 
 161   SoftRefPolicy      _soft_ref_policy;
 162 
 163   static size_t _humongous_object_threshold_in_words;
 164 
 165   // These sets keep track of old, archive and humongous regions respectively.
 166   HeapRegionSet _old_set;
 167   HeapRegionSet _archive_set;
 168   HeapRegionSet _humongous_set;
 169 
 170   void eagerly_reclaim_humongous_regions();
 171   // Start a new incremental collection set for the next pause.
 172   void start_new_collection_set();
 173 
 174   // The block offset table for the G1 heap.
 175   G1BlockOffsetTable* _bot;
 176 
 177   // Tears down the region sets / lists so that they are empty and the
 178   // regions on the heap do not belong to a region set / list. The
 179   // only exception is the humongous set which we leave unaltered. If
 180   // free_list_only is true, it will only tear down the master free
 181   // list. It is called before a Full GC (free_list_only == false) or
 182   // before heap shrinking (free_list_only == true).
 183   void tear_down_region_sets(bool free_list_only);
 184 
 185   // Rebuilds the region sets / lists so that they are repopulated to
 186   // reflect the contents of the heap. The only exception is the
 187   // humongous set which was not torn down in the first place. If
 188   // free_list_only is true, it will only rebuild the master free
 189   // list. It is called after a Full GC (free_list_only == false) or
 190   // after heap shrinking (free_list_only == true).
 191   void rebuild_region_sets(bool free_list_only);
 192 
 193   // Callback for region mapping changed events.
 194   G1RegionMappingChangedListener _listener;
 195 
 196   // Manages single or multi node memory.
 197   G1MemoryNodeManager* _mem_node_mgr;
 198 
 199   // The sequence of all heap regions in the heap.
 200   HeapRegionManager* _hrm;
 201 
 202   // Manages all allocations with regions except humongous object allocations.
 203   G1Allocator* _allocator;
 204 
 205   // Manages all heap verification.
 206   G1HeapVerifier* _verifier;
 207 
 208   // Outside of GC pauses, the number of bytes used in all regions other
 209   // than the current allocation region(s).
 210   volatile size_t _summary_bytes_used;
 211 
 212   void increase_used(size_t bytes);
 213   void decrease_used(size_t bytes);
 214 
 215   void set_used(size_t bytes);
 216 
 217   // Class that handles archive allocation ranges.
 218   G1ArchiveAllocator* _archive_allocator;
 219 
 220   // GC allocation statistics policy for survivors.
 221   G1EvacStats _survivor_evac_stats;
 222 
 223   // GC allocation statistics policy for tenured objects.
 224   G1EvacStats _old_evac_stats;
 225 
 226   // It specifies whether we should attempt to expand the heap after a
 227   // region allocation failure. If heap expansion fails we set this to
 228   // false so that we don't re-attempt the heap expansion (it's likely
 229   // that subsequent expansion attempts will also fail if one fails).
 230   // Currently, it is only consulted during GC and it's reset at the
 231   // start of each GC.
 232   bool _expand_heap_after_alloc_failure;
 233 
 234   // Helper for monitoring and management support.
 235   G1MonitoringSupport* _g1mm;
 236 
 237   // Records whether the region at the given index is (still) a
 238   // candidate for eager reclaim.  Only valid for humongous start
 239   // regions; other regions have unspecified values.  Humongous start
 240   // regions are initialized at start of collection pause, with
 241   // candidates removed from the set as they are found reachable from
 242   // roots or the young generation.
 243   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 244    protected:
 245     bool default_value() const { return false; }
 246    public:
 247     void clear() { G1BiasedMappedArray<bool>::clear(); }
 248     void set_candidate(uint region, bool value) {
 249       set_by_index(region, value);
 250     }
 251     bool is_candidate(uint region) {
 252       return get_by_index(region);
 253     }
 254   };
 255 
 256   HumongousReclaimCandidates _humongous_reclaim_candidates;
 257   // Stores whether during humongous object registration we found candidate regions.
 258   // If not, we can skip a few steps.
 259   bool _has_humongous_reclaim_candidates;
 260 
 261   G1HRPrinter _hr_printer;
 262 
 263   // It decides whether an explicit GC should start a concurrent cycle
 264   // instead of doing a STW GC. Currently, a concurrent cycle is
 265   // explicitly started if:
 266   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 267   // (b) cause == _g1_humongous_allocation
 268   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 269   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
 270   // (e) cause == _wb_conc_mark
 271   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 272 
 273   // Return true if should upgrade to full gc after an incremental one.
 274   bool should_upgrade_to_full_gc(GCCause::Cause cause);
 275 
 276   // indicates whether we are in young or mixed GC mode
 277   G1CollectorState _collector_state;
 278 
 279   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 280   // concurrent cycles) we have started.
 281   volatile uint _old_marking_cycles_started;
 282 
 283   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 284   // concurrent cycles) we have completed.
 285   volatile uint _old_marking_cycles_completed;
 286 
 287   // This is a non-product method that is helpful for testing. It is
 288   // called at the end of a GC and artificially expands the heap by
 289   // allocating a number of dead regions. This way we can induce very
 290   // frequent marking cycles and stress the cleanup / concurrent
 291   // cleanup code more (as all the regions that will be allocated by
 292   // this method will be found dead by the marking cycle).
 293   void allocate_dummy_regions() PRODUCT_RETURN;
 294 
 295   // If the HR printer is active, dump the state of the regions in the
 296   // heap after a compaction.
 297   void print_hrm_post_compaction();
 298 
 299   // Create a memory mapper for auxiliary data structures of the given size and
 300   // translation factor.
 301   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 302                                                          size_t size,
 303                                                          size_t translation_factor);
 304 
 305   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 306 
 307   // These are macros so that, if the assert fires, we get the correct
 308   // line number, file, etc.
 309 
 310 #define heap_locking_asserts_params(_extra_message_)                          \
 311   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 312   (_extra_message_),                                                          \
 313   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 314   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 315   BOOL_TO_STR(Thread::current()->is_VM_thread())
 316 
 317 #define assert_heap_locked()                                                  \
 318   do {                                                                        \
 319     assert(Heap_lock->owned_by_self(),                                        \
 320            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 321   } while (0)
 322 
 323 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 324   do {                                                                        \
 325     assert(Heap_lock->owned_by_self() ||                                      \
 326            (SafepointSynchronize::is_at_safepoint() &&                        \
 327              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 328            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 329                                         "should be at a safepoint"));         \
 330   } while (0)
 331 
 332 #define assert_heap_locked_and_not_at_safepoint()                             \
 333   do {                                                                        \
 334     assert(Heap_lock->owned_by_self() &&                                      \
 335                                     !SafepointSynchronize::is_at_safepoint(), \
 336           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 337                                        "should not be at a safepoint"));      \
 338   } while (0)
 339 
 340 #define assert_heap_not_locked()                                              \
 341   do {                                                                        \
 342     assert(!Heap_lock->owned_by_self(),                                       \
 343         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 344   } while (0)
 345 
 346 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 347   do {                                                                        \
 348     assert(!Heap_lock->owned_by_self() &&                                     \
 349                                     !SafepointSynchronize::is_at_safepoint(), \
 350       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 351                                    "should not be at a safepoint"));          \
 352   } while (0)
 353 
 354 #define assert_at_safepoint_on_vm_thread()                                    \
 355   do {                                                                        \
 356     assert_at_safepoint();                                                    \
 357     assert(Thread::current_or_null() != NULL, "no current thread");           \
 358     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 359   } while (0)
 360 
 361 #ifdef ASSERT
 362 #define assert_used_and_recalculate_used_equal(g1h)                           \
 363   do {                                                                        \
 364     size_t cur_used_bytes = g1h->used();                                      \
 365     size_t recal_used_bytes = g1h->recalculate_used();                        \
 366     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 367            " same as recalculated used(" SIZE_FORMAT ").",                    \
 368            cur_used_bytes, recal_used_bytes);                                 \
 369   } while (0)
 370 #else
 371 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 372 #endif
 373 
 374   const char* young_gc_name() const;
 375 
 376   // The young region list.
 377   G1EdenRegions _eden;
 378   G1SurvivorRegions _survivor;
 379 
 380   STWGCTimer* _gc_timer_stw;
 381 
 382   G1NewTracer* _gc_tracer_stw;
 383 
 384   // The current policy object for the collector.
 385   G1Policy* _policy;
 386   G1HeapSizingPolicy* _heap_sizing_policy;
 387 
 388   G1CollectionSet _collection_set;
 389 
 390   // Try to allocate a single non-humongous HeapRegion sufficient for
 391   // an allocation of the given word_size. If do_expand is true,
 392   // attempt to expand the heap if necessary to satisfy the allocation
 393   // request. 'type' takes the type of region to be allocated. (Use constants
 394   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 395   HeapRegion* new_region(size_t word_size,
 396                          HeapRegionType type,
 397                          bool do_expand,
 398                          uint node_index = G1MemoryNodeManager::AnyNodeIndex);
 399 
 400   // Initialize a contiguous set of free regions of length num_regions
 401   // and starting at index first so that they appear as a single
 402   // humongous region.
 403   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
 404                                                       uint num_regions,
 405                                                       size_t word_size);
 406 
 407   // Attempt to allocate a humongous object of the given size. Return
 408   // NULL if unsuccessful.
 409   HeapWord* humongous_obj_allocate(size_t word_size);
 410 
 411   // The following two methods, allocate_new_tlab() and
 412   // mem_allocate(), are the two main entry points from the runtime
 413   // into the G1's allocation routines. They have the following
 414   // assumptions:
 415   //
 416   // * They should both be called outside safepoints.
 417   //
 418   // * They should both be called without holding the Heap_lock.
 419   //
 420   // * All allocation requests for new TLABs should go to
 421   //   allocate_new_tlab().
 422   //
 423   // * All non-TLAB allocation requests should go to mem_allocate().
 424   //
 425   // * If either call cannot satisfy the allocation request using the
 426   //   current allocating region, they will try to get a new one. If
 427   //   this fails, they will attempt to do an evacuation pause and
 428   //   retry the allocation.
 429   //
 430   // * If all allocation attempts fail, even after trying to schedule
 431   //   an evacuation pause, allocate_new_tlab() will return NULL,
 432   //   whereas mem_allocate() will attempt a heap expansion and/or
 433   //   schedule a Full GC.
 434   //
 435   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 436   //   should never be called with word_size being humongous. All
 437   //   humongous allocation requests should go to mem_allocate() which
 438   //   will satisfy them with a special path.
 439 
 440   virtual HeapWord* allocate_new_tlab(size_t min_size,
 441                                       size_t requested_size,
 442                                       size_t* actual_size);
 443 
 444   virtual HeapWord* mem_allocate(size_t word_size,
 445                                  bool*  gc_overhead_limit_was_exceeded);
 446 
 447   // First-level mutator allocation attempt: try to allocate out of
 448   // the mutator alloc region without taking the Heap_lock. This
 449   // should only be used for non-humongous allocations.
 450   inline HeapWord* attempt_allocation(size_t min_word_size,
 451                                       size_t desired_word_size,
 452                                       size_t* actual_word_size);
 453 
 454   // Second-level mutator allocation attempt: take the Heap_lock and
 455   // retry the allocation attempt, potentially scheduling a GC
 456   // pause. This should only be used for non-humongous allocations.
 457   HeapWord* attempt_allocation_slow(size_t word_size);
 458 
 459   // Takes the Heap_lock and attempts a humongous allocation. It can
 460   // potentially schedule a GC pause.
 461   HeapWord* attempt_allocation_humongous(size_t word_size);
 462 
 463   // Allocation attempt that should be called during safepoints (e.g.,
 464   // at the end of a successful GC). expect_null_mutator_alloc_region
 465   // specifies whether the mutator alloc region is expected to be NULL
 466   // or not.
 467   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 468                                             bool expect_null_mutator_alloc_region);
 469 
 470   // These methods are the "callbacks" from the G1AllocRegion class.
 471 
 472   // For mutator alloc regions.
 473   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 474   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 475                                    size_t allocated_bytes);
 476 
 477   // For GC alloc regions.
 478   bool has_more_regions(G1HeapRegionAttr dest);
 479   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
 480   void retire_gc_alloc_region(HeapRegion* alloc_region,
 481                               size_t allocated_bytes, G1HeapRegionAttr dest);
 482 
 483   // - if explicit_gc is true, the GC is for a System.gc() etc,
 484   //   otherwise it's for a failed allocation.
 485   // - if clear_all_soft_refs is true, all soft references should be
 486   //   cleared during the GC.
 487   // - it returns false if it is unable to do the collection due to the
 488   //   GC locker being active, true otherwise.
 489   bool do_full_collection(bool explicit_gc,
 490                           bool clear_all_soft_refs);
 491 
 492   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 493   virtual void do_full_collection(bool clear_all_soft_refs);
 494 
 495   // Callback from VM_G1CollectForAllocation operation.
 496   // This function does everything necessary/possible to satisfy a
 497   // failed allocation request (including collection, expansion, etc.)
 498   HeapWord* satisfy_failed_allocation(size_t word_size,
 499                                       bool* succeeded);
 500   // Internal helpers used during full GC to split it up to
 501   // increase readability.
 502   void abort_concurrent_cycle();
 503   void verify_before_full_collection(bool explicit_gc);
 504   void prepare_heap_for_full_collection();
 505   void prepare_heap_for_mutators();
 506   void abort_refinement();
 507   void verify_after_full_collection();
 508   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 509 
 510   // Helper method for satisfy_failed_allocation()
 511   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 512                                              bool do_gc,
 513                                              bool clear_all_soft_refs,
 514                                              bool expect_null_mutator_alloc_region,
 515                                              bool* gc_succeeded);
 516 
 517   // Attempting to expand the heap sufficiently
 518   // to support an allocation of the given "word_size".  If
 519   // successful, perform the allocation and return the address of the
 520   // allocated block, or else "NULL".
 521   HeapWord* expand_and_allocate(size_t word_size);
 522 
 523   // Process any reference objects discovered.
 524   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 525 
 526   // If during an initial mark pause we may install a pending list head which is not
 527   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 528   // to discover.
 529   void make_pending_list_reachable();
 530 
 531   // Merges the information gathered on a per-thread basis for all worker threads
 532   // during GC into global variables.
 533   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 534 public:
 535   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 536 
 537   WorkGang* workers() const { return _workers; }
 538 
 539   // Runs the given AbstractGangTask with the current active workers, returning the
 540   // total time taken.
 541   Tickspan run_task(AbstractGangTask* task);
 542 
 543   G1Allocator* allocator() {
 544     return _allocator;
 545   }
 546 
 547   G1HeapVerifier* verifier() {
 548     return _verifier;
 549   }
 550 
 551   G1MonitoringSupport* g1mm() {
 552     assert(_g1mm != NULL, "should have been initialized");
 553     return _g1mm;
 554   }
 555 
 556   void resize_heap_if_necessary();
 557 
 558   G1MemoryNodeManager* mem_node_mgr() const { return _mem_node_mgr; }
 559 
 560   // Expand the garbage-first heap by at least the given size (in bytes!).
 561   // Returns true if the heap was expanded by the requested amount;
 562   // false otherwise.
 563   // (Rounds up to a HeapRegion boundary.)
 564   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 565   bool expand_single_region(uint node_index);
 566 
 567   // Returns the PLAB statistics for a given destination.
 568   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 569 
 570   // Determines PLAB size for a given destination.
 571   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 572 
 573   // Do anything common to GC's.
 574   void gc_prologue(bool full);
 575   void gc_epilogue(bool full);
 576 
 577   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 578   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 579 
 580   // Modify the reclaim candidate set and test for presence.
 581   // These are only valid for starts_humongous regions.
 582   inline void set_humongous_reclaim_candidate(uint region, bool value);
 583   inline bool is_humongous_reclaim_candidate(uint region);
 584 
 585   // Remove from the reclaim candidate set.  Also remove from the
 586   // collection set so that later encounters avoid the slow path.
 587   inline void set_humongous_is_live(oop obj);
 588 
 589   // Register the given region to be part of the collection set.
 590   inline void register_humongous_region_with_region_attr(uint index);
 591   // Update region attributes table with information about all regions.
 592   void register_regions_with_region_attr();
 593   // We register a region with the fast "in collection set" test. We
 594   // simply set to true the array slot corresponding to this region.
 595   void register_young_region_with_region_attr(HeapRegion* r) {
 596     _region_attr.set_in_young(r->hrm_index());
 597   }
 598   inline void register_region_with_region_attr(HeapRegion* r);
 599   inline void register_old_region_with_region_attr(HeapRegion* r);
 600   inline void register_optional_region_with_region_attr(HeapRegion* r);
 601 
 602   void clear_region_attr(const HeapRegion* hr) {
 603     _region_attr.clear(hr);
 604   }
 605 
 606   void clear_region_attr() {
 607     _region_attr.clear();
 608   }
 609 
 610   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 611   // for all regions.
 612   void verify_region_attr_remset_update() PRODUCT_RETURN;
 613 
 614   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 615 
 616   // This is called at the start of either a concurrent cycle or a Full
 617   // GC to update the number of old marking cycles started.
 618   void increment_old_marking_cycles_started();
 619 
 620   // This is called at the end of either a concurrent cycle or a Full
 621   // GC to update the number of old marking cycles completed. Those two
 622   // can happen in a nested fashion, i.e., we start a concurrent
 623   // cycle, a Full GC happens half-way through it which ends first,
 624   // and then the cycle notices that a Full GC happened and ends
 625   // too. The concurrent parameter is a boolean to help us do a bit
 626   // tighter consistency checking in the method. If concurrent is
 627   // false, the caller is the inner caller in the nesting (i.e., the
 628   // Full GC). If concurrent is true, the caller is the outer caller
 629   // in this nesting (i.e., the concurrent cycle). Further nesting is
 630   // not currently supported. The end of this call also notifies
 631   // the FullGCCount_lock in case a Java thread is waiting for a full
 632   // GC to happen (e.g., it called System.gc() with
 633   // +ExplicitGCInvokesConcurrent).
 634   void increment_old_marking_cycles_completed(bool concurrent);
 635 
 636   uint old_marking_cycles_completed() {
 637     return _old_marking_cycles_completed;
 638   }
 639 
 640   G1HRPrinter* hr_printer() { return &_hr_printer; }
 641 
 642   // Allocates a new heap region instance.
 643   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 644 
 645   // Allocate the highest free region in the reserved heap. This will commit
 646   // regions as necessary.
 647   HeapRegion* alloc_highest_free_region();
 648 
 649   // Frees a non-humongous region by initializing its contents and
 650   // adding it to the free list that's passed as a parameter (this is
 651   // usually a local list which will be appended to the master free
 652   // list later). The used bytes of freed regions are accumulated in
 653   // pre_used. If skip_remset is true, the region's RSet will not be freed
 654   // up. If skip_hot_card_cache is true, the region's hot card cache will not
 655   // be freed up. The assumption is that this will be done later.
 656   // The locked parameter indicates if the caller has already taken
 657   // care of proper synchronization. This may allow some optimizations.
 658   void free_region(HeapRegion* hr,
 659                    FreeRegionList* free_list,
 660                    bool skip_remset,
 661                    bool skip_hot_card_cache = false,
 662                    bool locked = false);
 663 
 664   // It dirties the cards that cover the block so that the post
 665   // write barrier never queues anything when updating objects on this
 666   // block. It is assumed (and in fact we assert) that the block
 667   // belongs to a young region.
 668   inline void dirty_young_block(HeapWord* start, size_t word_size);
 669 
 670   // Frees a humongous region by collapsing it into individual regions
 671   // and calling free_region() for each of them. The freed regions
 672   // will be added to the free list that's passed as a parameter (this
 673   // is usually a local list which will be appended to the master free
 674   // list later).
 675   // The method assumes that only a single thread is ever calling
 676   // this for a particular region at once.
 677   void free_humongous_region(HeapRegion* hr,
 678                              FreeRegionList* free_list);
 679 
 680   // Facility for allocating in 'archive' regions in high heap memory and
 681   // recording the allocated ranges. These should all be called from the
 682   // VM thread at safepoints, without the heap lock held. They can be used
 683   // to create and archive a set of heap regions which can be mapped at the
 684   // same fixed addresses in a subsequent JVM invocation.
 685   void begin_archive_alloc_range(bool open = false);
 686 
 687   // Check if the requested size would be too large for an archive allocation.
 688   bool is_archive_alloc_too_large(size_t word_size);
 689 
 690   // Allocate memory of the requested size from the archive region. This will
 691   // return NULL if the size is too large or if no memory is available. It
 692   // does not trigger a garbage collection.
 693   HeapWord* archive_mem_allocate(size_t word_size);
 694 
 695   // Optionally aligns the end address and returns the allocated ranges in
 696   // an array of MemRegions in order of ascending addresses.
 697   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 698                                size_t end_alignment_in_bytes = 0);
 699 
 700   // Facility for allocating a fixed range within the heap and marking
 701   // the containing regions as 'archive'. For use at JVM init time, when the
 702   // caller may mmap archived heap data at the specified range(s).
 703   // Verify that the MemRegions specified in the argument array are within the
 704   // reserved heap.
 705   bool check_archive_addresses(MemRegion* range, size_t count);
 706 
 707   // Commit the appropriate G1 regions containing the specified MemRegions
 708   // and mark them as 'archive' regions. The regions in the array must be
 709   // non-overlapping and in order of ascending address.
 710   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 711 
 712   // Insert any required filler objects in the G1 regions around the specified
 713   // ranges to make the regions parseable. This must be called after
 714   // alloc_archive_regions, and after class loading has occurred.
 715   void fill_archive_regions(MemRegion* range, size_t count);
 716 
 717   // For each of the specified MemRegions, uncommit the containing G1 regions
 718   // which had been allocated by alloc_archive_regions. This should be called
 719   // rather than fill_archive_regions at JVM init time if the archive file
 720   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 721   void dealloc_archive_regions(MemRegion* range, size_t count, bool is_open);
 722 
 723   oop materialize_archived_object(oop obj);
 724 
 725 private:
 726 
 727   // Shrink the garbage-first heap by at most the given size (in bytes!).
 728   // (Rounds down to a HeapRegion boundary.)
 729   void shrink(size_t expand_bytes);
 730   void shrink_helper(size_t expand_bytes);
 731 
 732   #if TASKQUEUE_STATS
 733   static void print_taskqueue_stats_hdr(outputStream* const st);
 734   void print_taskqueue_stats() const;
 735   void reset_taskqueue_stats();
 736   #endif // TASKQUEUE_STATS
 737 
 738   // Schedule the VM operation that will do an evacuation pause to
 739   // satisfy an allocation request of word_size. *succeeded will
 740   // return whether the VM operation was successful (it did do an
 741   // evacuation pause) or not (another thread beat us to it or the GC
 742   // locker was active). Given that we should not be holding the
 743   // Heap_lock when we enter this method, we will pass the
 744   // gc_count_before (i.e., total_collections()) as a parameter since
 745   // it has to be read while holding the Heap_lock. Currently, both
 746   // methods that call do_collection_pause() release the Heap_lock
 747   // before the call, so it's easy to read gc_count_before just before.
 748   HeapWord* do_collection_pause(size_t         word_size,
 749                                 uint           gc_count_before,
 750                                 bool*          succeeded,
 751                                 GCCause::Cause gc_cause);
 752 
 753   void wait_for_root_region_scanning();
 754 
 755   // The guts of the incremental collection pause, executed by the vm
 756   // thread. It returns false if it is unable to do the collection due
 757   // to the GC locker being active, true otherwise
 758   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 759 
 760   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 761   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 762   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 763 
 764   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 765 
 766   // Actually do the work of evacuating the parts of the collection set.
 767   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 768   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 769 private:
 770   // Evacuate the next set of optional regions.
 771   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 772 
 773 public:
 774   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 775   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 776                                     G1RedirtyCardsQueueSet* rdcqs,
 777                                     G1ParScanThreadStateSet* pss);
 778 
 779   void expand_heap_after_young_collection();
 780   // Update object copying statistics.
 781   void record_obj_copy_mem_stats();
 782 
 783   // The hot card cache for remembered set insertion optimization.
 784   G1HotCardCache* _hot_card_cache;
 785 
 786   // The g1 remembered set of the heap.
 787   G1RemSet* _rem_set;
 788 
 789   // After a collection pause, convert the regions in the collection set into free
 790   // regions.
 791   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 792 
 793   // Abandon the current collection set without recording policy
 794   // statistics or updating free lists.
 795   void abandon_collection_set(G1CollectionSet* collection_set);
 796 
 797   // The concurrent marker (and the thread it runs in.)
 798   G1ConcurrentMark* _cm;
 799   G1ConcurrentMarkThread* _cm_thread;
 800 
 801   // The concurrent refiner.
 802   G1ConcurrentRefine* _cr;
 803 
 804   // The parallel task queues
 805   RefToScanQueueSet *_task_queues;
 806 
 807   // True iff a evacuation has failed in the current collection.
 808   bool _evacuation_failed;
 809 
 810   EvacuationFailedInfo* _evacuation_failed_info_array;
 811 
 812   // Failed evacuations cause some logical from-space objects to have
 813   // forwarding pointers to themselves.  Reset them.
 814   void remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs);
 815 
 816   // Restore the objects in the regions in the collection set after an
 817   // evacuation failure.
 818   void restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs);
 819 
 820   PreservedMarksSet _preserved_marks_set;
 821 
 822   // Preserve the mark of "obj", if necessary, in preparation for its mark
 823   // word being overwritten with a self-forwarding-pointer.
 824   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
 825 
 826 #ifndef PRODUCT
 827   // Support for forcing evacuation failures. Analogous to
 828   // PromotionFailureALot for the other collectors.
 829 
 830   // Records whether G1EvacuationFailureALot should be in effect
 831   // for the current GC
 832   bool _evacuation_failure_alot_for_current_gc;
 833 
 834   // Used to record the GC number for interval checking when
 835   // determining whether G1EvaucationFailureALot is in effect
 836   // for the current GC.
 837   size_t _evacuation_failure_alot_gc_number;
 838 
 839   // Count of the number of evacuations between failures.
 840   volatile size_t _evacuation_failure_alot_count;
 841 
 842   // Set whether G1EvacuationFailureALot should be in effect
 843   // for the current GC (based upon the type of GC and which
 844   // command line flags are set);
 845   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 846                                                   bool during_initial_mark,
 847                                                   bool mark_or_rebuild_in_progress);
 848 
 849   inline void set_evacuation_failure_alot_for_current_gc();
 850 
 851   // Return true if it's time to cause an evacuation failure.
 852   inline bool evacuation_should_fail();
 853 
 854   // Reset the G1EvacuationFailureALot counters.  Should be called at
 855   // the end of an evacuation pause in which an evacuation failure occurred.
 856   inline void reset_evacuation_should_fail();
 857 #endif // !PRODUCT
 858 
 859   // ("Weak") Reference processing support.
 860   //
 861   // G1 has 2 instances of the reference processor class. One
 862   // (_ref_processor_cm) handles reference object discovery
 863   // and subsequent processing during concurrent marking cycles.
 864   //
 865   // The other (_ref_processor_stw) handles reference object
 866   // discovery and processing during full GCs and incremental
 867   // evacuation pauses.
 868   //
 869   // During an incremental pause, reference discovery will be
 870   // temporarily disabled for _ref_processor_cm and will be
 871   // enabled for _ref_processor_stw. At the end of the evacuation
 872   // pause references discovered by _ref_processor_stw will be
 873   // processed and discovery will be disabled. The previous
 874   // setting for reference object discovery for _ref_processor_cm
 875   // will be re-instated.
 876   //
 877   // At the start of marking:
 878   //  * Discovery by the CM ref processor is verified to be inactive
 879   //    and it's discovered lists are empty.
 880   //  * Discovery by the CM ref processor is then enabled.
 881   //
 882   // At the end of marking:
 883   //  * Any references on the CM ref processor's discovered
 884   //    lists are processed (possibly MT).
 885   //
 886   // At the start of full GC we:
 887   //  * Disable discovery by the CM ref processor and
 888   //    empty CM ref processor's discovered lists
 889   //    (without processing any entries).
 890   //  * Verify that the STW ref processor is inactive and it's
 891   //    discovered lists are empty.
 892   //  * Temporarily set STW ref processor discovery as single threaded.
 893   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 894   //    field.
 895   //  * Finally enable discovery by the STW ref processor.
 896   //
 897   // The STW ref processor is used to record any discovered
 898   // references during the full GC.
 899   //
 900   // At the end of a full GC we:
 901   //  * Enqueue any reference objects discovered by the STW ref processor
 902   //    that have non-live referents. This has the side-effect of
 903   //    making the STW ref processor inactive by disabling discovery.
 904   //  * Verify that the CM ref processor is still inactive
 905   //    and no references have been placed on it's discovered
 906   //    lists (also checked as a precondition during initial marking).
 907 
 908   // The (stw) reference processor...
 909   ReferenceProcessor* _ref_processor_stw;
 910 
 911   // During reference object discovery, the _is_alive_non_header
 912   // closure (if non-null) is applied to the referent object to
 913   // determine whether the referent is live. If so then the
 914   // reference object does not need to be 'discovered' and can
 915   // be treated as a regular oop. This has the benefit of reducing
 916   // the number of 'discovered' reference objects that need to
 917   // be processed.
 918   //
 919   // Instance of the is_alive closure for embedding into the
 920   // STW reference processor as the _is_alive_non_header field.
 921   // Supplying a value for the _is_alive_non_header field is
 922   // optional but doing so prevents unnecessary additions to
 923   // the discovered lists during reference discovery.
 924   G1STWIsAliveClosure _is_alive_closure_stw;
 925 
 926   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 927 
 928   // The (concurrent marking) reference processor...
 929   ReferenceProcessor* _ref_processor_cm;
 930 
 931   // Instance of the concurrent mark is_alive closure for embedding
 932   // into the Concurrent Marking reference processor as the
 933   // _is_alive_non_header field. Supplying a value for the
 934   // _is_alive_non_header field is optional but doing so prevents
 935   // unnecessary additions to the discovered lists during reference
 936   // discovery.
 937   G1CMIsAliveClosure _is_alive_closure_cm;
 938 
 939   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 940 public:
 941   RefToScanQueue *task_queue(uint i) const;
 942 
 943   uint num_task_queues() const;
 944 
 945   // Create a G1CollectedHeap.
 946   // Must call the initialize method afterwards.
 947   // May not return if something goes wrong.
 948   G1CollectedHeap();
 949 
 950 private:
 951   jint initialize_concurrent_refinement();
 952   jint initialize_young_gen_sampling_thread();
 953 public:
 954   // Initialize the G1CollectedHeap to have the initial and
 955   // maximum sizes and remembered and barrier sets
 956   // specified by the policy object.
 957   jint initialize();
 958 
 959   virtual void stop();
 960   virtual void safepoint_synchronize_begin();
 961   virtual void safepoint_synchronize_end();
 962 
 963   // Does operations required after initialization has been done.
 964   void post_initialize();
 965 
 966   // Initialize weak reference processing.
 967   void ref_processing_init();
 968 
 969   virtual Name kind() const {
 970     return CollectedHeap::G1;
 971   }
 972 
 973   virtual const char* name() const {
 974     return "G1";
 975   }
 976 
 977   const G1CollectorState* collector_state() const { return &_collector_state; }
 978   G1CollectorState* collector_state() { return &_collector_state; }
 979 
 980   // The current policy object for the collector.
 981   G1Policy* policy() const { return _policy; }
 982   // The remembered set.
 983   G1RemSet* rem_set() const { return _rem_set; }
 984 
 985   inline G1GCPhaseTimes* phase_times() const;
 986 
 987   HeapRegionManager* hrm() const { return _hrm; }
 988 
 989   const G1CollectionSet* collection_set() const { return &_collection_set; }
 990   G1CollectionSet* collection_set() { return &_collection_set; }
 991 
 992   virtual SoftRefPolicy* soft_ref_policy();
 993 
 994   virtual void initialize_serviceability();
 995   virtual MemoryUsage memory_usage();
 996   virtual GrowableArray<GCMemoryManager*> memory_managers();
 997   virtual GrowableArray<MemoryPool*> memory_pools();
 998 
 999   // Try to minimize the remembered set.
1000   void scrub_rem_set();
1001 
1002   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1003   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
1004 
1005   // The shared block offset table array.
1006   G1BlockOffsetTable* bot() const { return _bot; }
1007 
1008   // Reference Processing accessors
1009 
1010   // The STW reference processor....
1011   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1012 
1013   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1014 
1015   // The Concurrent Marking reference processor...
1016   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1017 
1018   size_t unused_committed_regions_in_bytes() const;
1019 
1020   virtual size_t capacity() const;
1021   virtual size_t used() const;
1022   // This should be called when we're not holding the heap lock. The
1023   // result might be a bit inaccurate.
1024   size_t used_unlocked() const;
1025   size_t recalculate_used() const;
1026 
1027   // These virtual functions do the actual allocation.
1028   // Some heaps may offer a contiguous region for shared non-blocking
1029   // allocation, via inlined code (by exporting the address of the top and
1030   // end fields defining the extent of the contiguous allocation region.)
1031   // But G1CollectedHeap doesn't yet support this.
1032 
1033   virtual bool is_maximal_no_gc() const {
1034     return _hrm->available() == 0;
1035   }
1036 
1037   // Returns whether there are any regions left in the heap for allocation.
1038   bool has_regions_left_for_allocation() const {
1039     return !is_maximal_no_gc() || num_free_regions() != 0;
1040   }
1041 
1042   // The current number of regions in the heap.
1043   uint num_regions() const { return _hrm->length(); }
1044 
1045   // The max number of regions in the heap.
1046   uint max_regions() const { return _hrm->max_length(); }
1047 
1048   // Max number of regions that can be comitted.
1049   uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
1050 
1051   // The number of regions that are completely free.
1052   uint num_free_regions() const { return _hrm->num_free_regions(); }
1053 
1054   // The number of regions that can be allocated into.
1055   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1056 
1057   MemoryUsage get_auxiliary_data_memory_usage() const {
1058     return _hrm->get_auxiliary_data_memory_usage();
1059   }
1060 
1061   // The number of regions that are not completely free.
1062   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1063 
1064 #ifdef ASSERT
1065   bool is_on_master_free_list(HeapRegion* hr) {
1066     return _hrm->is_free(hr);
1067   }
1068 #endif // ASSERT
1069 
1070   inline void old_set_add(HeapRegion* hr);
1071   inline void old_set_remove(HeapRegion* hr);
1072 
1073   inline void archive_set_add(HeapRegion* hr);
1074 
1075   size_t non_young_capacity_bytes() {
1076     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1077   }
1078 
1079   // Determine whether the given region is one that we are using as an
1080   // old GC alloc region.
1081   bool is_old_gc_alloc_region(HeapRegion* hr);
1082 
1083   // Perform a collection of the heap; intended for use in implementing
1084   // "System.gc".  This probably implies as full a collection as the
1085   // "CollectedHeap" supports.
1086   virtual void collect(GCCause::Cause cause);
1087 
1088   // Perform a collection of the heap with the given cause; if the VM operation
1089   // fails to execute for any reason, retry only if retry_on_gc_failure is set.
1090   // Returns whether this collection actually executed.
1091   bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure);
1092 
1093   // True iff an evacuation has failed in the most-recent collection.
1094   bool evacuation_failed() { return _evacuation_failed; }
1095 
1096   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1097   void prepend_to_freelist(FreeRegionList* list);
1098   void decrement_summary_bytes(size_t bytes);
1099 
1100   virtual bool is_in(const void* p) const;
1101 #ifdef ASSERT
1102   // Returns whether p is in one of the available areas of the heap. Slow but
1103   // extensive version.
1104   bool is_in_exact(const void* p) const;
1105 #endif
1106 
1107   // Return "TRUE" iff the given object address is within the collection
1108   // set. Assumes that the reference points into the heap.
1109   inline bool is_in_cset(const HeapRegion *hr);
1110   inline bool is_in_cset(oop obj);
1111   inline bool is_in_cset(HeapWord* addr);
1112 
1113   inline bool is_in_cset_or_humongous(const oop obj);
1114 
1115  private:
1116   // This array is used for a quick test on whether a reference points into
1117   // the collection set or not. Each of the array's elements denotes whether the
1118   // corresponding region is in the collection set or not.
1119   G1HeapRegionAttrBiasedMappedArray _region_attr;
1120 
1121  public:
1122 
1123   inline G1HeapRegionAttr region_attr(const void* obj) const;
1124   inline G1HeapRegionAttr region_attr(uint idx) const;
1125 
1126   // Return "TRUE" iff the given object address is in the reserved
1127   // region of g1.
1128   bool is_in_g1_reserved(const void* p) const {
1129     return _hrm->reserved().contains(p);
1130   }
1131 
1132   // Returns a MemRegion that corresponds to the space that has been
1133   // reserved for the heap
1134   MemRegion g1_reserved() const {
1135     return _hrm->reserved();
1136   }
1137 
1138   MemRegion reserved_region() const {
1139     return _reserved;
1140   }
1141 
1142   HeapWord* base() const {
1143     return _reserved.start();
1144   }
1145 
1146   bool is_in_reserved(const void* addr) const {
1147     return _reserved.contains(addr);
1148   }
1149 
1150   G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
1151 
1152   G1CardTable* card_table() const {
1153     return _card_table;
1154   }
1155 
1156   // Iteration functions.
1157 
1158   // Iterate over all objects, calling "cl.do_object" on each.
1159   virtual void object_iterate(ObjectClosure* cl);
1160 
1161   virtual void safe_object_iterate(ObjectClosure* cl) {
1162     object_iterate(cl);
1163   }
1164 
1165   // Iterate over heap regions, in address order, terminating the
1166   // iteration early if the "do_heap_region" method returns "true".
1167   void heap_region_iterate(HeapRegionClosure* blk) const;
1168 
1169   // Return the region with the given index. It assumes the index is valid.
1170   inline HeapRegion* region_at(uint index) const;
1171   inline HeapRegion* region_at_or_null(uint index) const;
1172 
1173   // Return the next region (by index) that is part of the same
1174   // humongous object that hr is part of.
1175   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1176 
1177   // Calculate the region index of the given address. Given address must be
1178   // within the heap.
1179   inline uint addr_to_region(HeapWord* addr) const;
1180 
1181   inline HeapWord* bottom_addr_for_region(uint index) const;
1182 
1183   // Two functions to iterate over the heap regions in parallel. Threads
1184   // compete using the HeapRegionClaimer to claim the regions before
1185   // applying the closure on them.
1186   // The _from_worker_offset version uses the HeapRegionClaimer and
1187   // the worker id to calculate a start offset to prevent all workers to
1188   // start from the point.
1189   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1190                                                   HeapRegionClaimer* hrclaimer,
1191                                                   uint worker_id) const;
1192 
1193   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1194                                           HeapRegionClaimer* hrclaimer) const;
1195 
1196   // Iterate over all regions currently in the current collection set.
1197   void collection_set_iterate_all(HeapRegionClosure* blk);
1198 
1199   // Iterate over the regions in the current increment of the collection set.
1200   // Starts the iteration so that the start regions of a given worker id over the
1201   // set active_workers are evenly spread across the set of collection set regions
1202   // to be iterated.
1203   // The variant with the HeapRegionClaimer guarantees that the closure will be
1204   // applied to a particular region exactly once.
1205   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1206     collection_set_iterate_increment_from(blk, NULL, worker_id);
1207   }
1208   void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1209 
1210   // Returns the HeapRegion that contains addr. addr must not be NULL.
1211   template <class T>
1212   inline HeapRegion* heap_region_containing(const T addr) const;
1213 
1214   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1215   // region. addr must not be NULL.
1216   template <class T>
1217   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1218 
1219   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1220   // each address in the (reserved) heap is a member of exactly
1221   // one block.  The defining characteristic of a block is that it is
1222   // possible to find its size, and thus to progress forward to the next
1223   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1224   // represent Java objects, or they might be free blocks in a
1225   // free-list-based heap (or subheap), as long as the two kinds are
1226   // distinguishable and the size of each is determinable.
1227 
1228   // Returns the address of the start of the "block" that contains the
1229   // address "addr".  We say "blocks" instead of "object" since some heaps
1230   // may not pack objects densely; a chunk may either be an object or a
1231   // non-object.
1232   HeapWord* block_start(const void* addr) const;
1233 
1234   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1235   // the block is an object.
1236   bool block_is_obj(const HeapWord* addr) const;
1237 
1238   // Section on thread-local allocation buffers (TLABs)
1239   // See CollectedHeap for semantics.
1240 
1241   bool supports_tlab_allocation() const;
1242   size_t tlab_capacity(Thread* ignored) const;
1243   size_t tlab_used(Thread* ignored) const;
1244   size_t max_tlab_size() const;
1245   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1246 
1247   inline bool is_in_young(const oop obj);
1248 
1249   // Returns "true" iff the given word_size is "very large".
1250   static bool is_humongous(size_t word_size) {
1251     // Note this has to be strictly greater-than as the TLABs
1252     // are capped at the humongous threshold and we want to
1253     // ensure that we don't try to allocate a TLAB as
1254     // humongous and that we don't allocate a humongous
1255     // object in a TLAB.
1256     return word_size > _humongous_object_threshold_in_words;
1257   }
1258 
1259   // Returns the humongous threshold for a specific region size
1260   static size_t humongous_threshold_for(size_t region_size) {
1261     return (region_size / 2);
1262   }
1263 
1264   // Returns the number of regions the humongous object of the given word size
1265   // requires.
1266   static size_t humongous_obj_size_in_regions(size_t word_size);
1267 
1268   // Print the maximum heap capacity.
1269   virtual size_t max_capacity() const;
1270 
1271   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1272   virtual size_t max_reserved_capacity() const;
1273 
1274   virtual jlong millis_since_last_gc();
1275 
1276 
1277   // Convenience function to be used in situations where the heap type can be
1278   // asserted to be this type.
1279   static G1CollectedHeap* heap();
1280 
1281   void set_region_short_lived_locked(HeapRegion* hr);
1282   // add appropriate methods for any other surv rate groups
1283 
1284   const G1SurvivorRegions* survivor() const { return &_survivor; }
1285 
1286   uint eden_regions_count() const { return _eden.length(); }
1287   uint survivor_regions_count() const { return _survivor.length(); }
1288   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1289   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1290   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1291   uint old_regions_count() const { return _old_set.length(); }
1292   uint archive_regions_count() const { return _archive_set.length(); }
1293   uint humongous_regions_count() const { return _humongous_set.length(); }
1294 
1295 #ifdef ASSERT
1296   bool check_young_list_empty();
1297 #endif
1298 
1299   // *** Stuff related to concurrent marking.  It's not clear to me that so
1300   // many of these need to be public.
1301 
1302   // The functions below are helper functions that a subclass of
1303   // "CollectedHeap" can use in the implementation of its virtual
1304   // functions.
1305   // This performs a concurrent marking of the live objects in a
1306   // bitmap off to the side.
1307   void do_concurrent_mark();
1308 
1309   bool is_marked_next(oop obj) const;
1310 
1311   // Determine if an object is dead, given the object and also
1312   // the region to which the object belongs. An object is dead
1313   // iff a) it was not allocated since the last mark, b) it
1314   // is not marked, and c) it is not in an archive region.
1315   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1316     return
1317       hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1318       !hr->is_archive();
1319   }
1320 
1321   // This function returns true when an object has been
1322   // around since the previous marking and hasn't yet
1323   // been marked during this marking, and is not in an archive region.
1324   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1325     return
1326       !hr->obj_allocated_since_next_marking(obj) &&
1327       !is_marked_next(obj) &&
1328       !hr->is_archive();
1329   }
1330 
1331   // Determine if an object is dead, given only the object itself.
1332   // This will find the region to which the object belongs and
1333   // then call the region version of the same function.
1334 
1335   // Added if it is NULL it isn't dead.
1336 
1337   inline bool is_obj_dead(const oop obj) const;
1338 
1339   inline bool is_obj_ill(const oop obj) const;
1340 
1341   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1342   inline bool is_obj_dead_full(const oop obj) const;
1343 
1344   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1345 
1346   // Refinement
1347 
1348   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1349 
1350   // Optimized nmethod scanning support routines
1351 
1352   // Register the given nmethod with the G1 heap.
1353   virtual void register_nmethod(nmethod* nm);
1354 
1355   // Unregister the given nmethod from the G1 heap.
1356   virtual void unregister_nmethod(nmethod* nm);
1357 
1358   // No nmethod flushing needed.
1359   virtual void flush_nmethod(nmethod* nm) {}
1360 
1361   // No nmethod verification implemented.
1362   virtual void verify_nmethod(nmethod* nm) {}
1363 
1364   // Free up superfluous code root memory.
1365   void purge_code_root_memory();
1366 
1367   // Rebuild the strong code root lists for each region
1368   // after a full GC.
1369   void rebuild_strong_code_roots();
1370 
1371   // Partial cleaning of VM internal data structures.
1372   void string_dedup_cleaning(BoolObjectClosure* is_alive,
1373                              OopClosure* keep_alive,
1374                              G1GCPhaseTimes* phase_times = NULL);
1375 
1376   // Performs cleaning of data structures after class unloading.
1377   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1378 
1379   // Redirty logged cards in the refinement queue.
1380   void redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs);
1381 
1382   // Verification
1383 
1384   // Deduplicate the string
1385   virtual void deduplicate_string(oop str);
1386 
1387   // Perform any cleanup actions necessary before allowing a verification.
1388   virtual void prepare_for_verify();
1389 
1390   // Perform verification.
1391 
1392   // vo == UsePrevMarking -> use "prev" marking information,
1393   // vo == UseNextMarking -> use "next" marking information
1394   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1395   //
1396   // NOTE: Only the "prev" marking information is guaranteed to be
1397   // consistent most of the time, so most calls to this should use
1398   // vo == UsePrevMarking.
1399   // Currently, there is only one case where this is called with
1400   // vo == UseNextMarking, which is to verify the "next" marking
1401   // information at the end of remark.
1402   // Currently there is only one place where this is called with
1403   // vo == UseFullMarking, which is to verify the marking during a
1404   // full GC.
1405   void verify(VerifyOption vo);
1406 
1407   // WhiteBox testing support.
1408   virtual bool supports_concurrent_phase_control() const;
1409   virtual bool request_concurrent_phase(const char* phase);
1410   bool is_heterogeneous_heap() const;
1411 
1412   virtual WorkGang* get_safepoint_workers() { return _workers; }
1413 
1414   // The methods below are here for convenience and dispatch the
1415   // appropriate method depending on value of the given VerifyOption
1416   // parameter. The values for that parameter, and their meanings,
1417   // are the same as those above.
1418 
1419   bool is_obj_dead_cond(const oop obj,
1420                         const HeapRegion* hr,
1421                         const VerifyOption vo) const;
1422 
1423   bool is_obj_dead_cond(const oop obj,
1424                         const VerifyOption vo) const;
1425 
1426   G1HeapSummary create_g1_heap_summary();
1427   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1428 
1429   // Printing
1430 private:
1431   void print_heap_regions() const;
1432   void print_regions_on(outputStream* st) const;
1433 
1434 public:
1435   virtual void print_on(outputStream* st) const;
1436   virtual void print_extended_on(outputStream* st) const;
1437   virtual void print_on_error(outputStream* st) const;
1438 
1439   virtual void print_gc_threads_on(outputStream* st) const;
1440   virtual void gc_threads_do(ThreadClosure* tc) const;
1441 
1442   // Override
1443   void print_tracing_info() const;
1444 
1445   // The following two methods are helpful for debugging RSet issues.
1446   void print_cset_rsets() PRODUCT_RETURN;
1447   void print_all_rsets() PRODUCT_RETURN;
1448 
1449   // Used to print information about locations in the hs_err file.
1450   virtual bool print_location(outputStream* st, void* addr) const;
1451 
1452   size_t pending_card_num();
1453 };
1454 
1455 class G1ParEvacuateFollowersClosure : public VoidClosure {
1456 private:
1457   double _start_term;
1458   double _term_time;
1459   size_t _term_attempts;
1460 
1461   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1462   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1463 protected:
1464   G1CollectedHeap*              _g1h;
1465   G1ParScanThreadState*         _par_scan_state;
1466   RefToScanQueueSet*            _queues;
1467   ParallelTaskTerminator*       _terminator;
1468   G1GCPhaseTimes::GCParPhases   _phase;
1469 
1470   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1471   RefToScanQueueSet*      queues()         { return _queues; }
1472   ParallelTaskTerminator* terminator()     { return _terminator; }
1473 
1474 public:
1475   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1476                                 G1ParScanThreadState* par_scan_state,
1477                                 RefToScanQueueSet* queues,
1478                                 ParallelTaskTerminator* terminator,
1479                                 G1GCPhaseTimes::GCParPhases phase)
1480     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1481       _g1h(g1h), _par_scan_state(par_scan_state),
1482       _queues(queues), _terminator(terminator), _phase(phase) {}
1483 
1484   void do_void();
1485 
1486   double term_time() const { return _term_time; }
1487   size_t term_attempts() const { return _term_attempts; }
1488 
1489 private:
1490   inline bool offer_termination();
1491 };
1492 
1493 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP