rev 59939 : [mq]: 8243974-investigate-millis-since-last-gc-move

   1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1EvacuationInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1HeapRegionAttr.hpp"
  43 #include "gc/g1/g1MonitoringSupport.hpp"
  44 #include "gc/g1/g1NUMA.hpp"
  45 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  46 #include "gc/g1/g1SurvivorRegions.hpp"
  47 #include "gc/g1/g1YCTypes.hpp"
  48 #include "gc/g1/heapRegionManager.hpp"
  49 #include "gc/g1/heapRegionSet.hpp"
  50 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  51 #include "gc/shared/barrierSet.hpp"
  52 #include "gc/shared/collectedHeap.hpp"
  53 #include "gc/shared/gcHeapSummary.hpp"
  54 #include "gc/shared/plab.hpp"
  55 #include "gc/shared/preservedMarks.hpp"
  56 #include "gc/shared/softRefPolicy.hpp"
  57 #include "gc/shared/taskqueue.hpp"
  58 #include "memory/memRegion.hpp"
  59 #include "utilities/stack.hpp"
  60 
  61 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  62 // It uses the "Garbage First" heap organization and algorithm, which
  63 // may combine concurrent marking with parallel, incremental compaction of
  64 // heap subsets that will yield large amounts of garbage.
  65 
  66 // Forward declarations
  67 class HeapRegion;
  68 class GenerationSpec;
  69 class G1ParScanThreadState;
  70 class G1ParScanThreadStateSet;
  71 class G1ParScanThreadState;
  72 class MemoryPool;
  73 class MemoryManager;
  74 class ObjectClosure;
  75 class SpaceClosure;
  76 class CompactibleSpaceClosure;
  77 class Space;
  78 class G1CardTableEntryClosure;
  79 class G1CollectionSet;
  80 class G1Policy;
  81 class G1HotCardCache;
  82 class G1RemSet;
  83 class G1YoungRemSetSamplingThread;
  84 class G1ConcurrentMark;
  85 class G1ConcurrentMarkThread;
  86 class G1ConcurrentRefine;
  87 class GenerationCounters;
  88 class STWGCTimer;
  89 class G1NewTracer;
  90 class EvacuationFailedInfo;
  91 class nmethod;
  92 class WorkGang;
  93 class G1Allocator;
  94 class G1ArchiveAllocator;
  95 class G1FullGCScope;
  96 class G1HeapVerifier;
  97 class G1HeapSizingPolicy;
  98 class G1HeapSummary;
  99 class G1EvacSummary;
 100 
 101 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
 102 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
 103 
 104 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 105 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 106 
 107 // The G1 STW is alive closure.
 108 // An instance is embedded into the G1CH and used as the
 109 // (optional) _is_alive_non_header closure in the STW
 110 // reference processor. It is also extensively used during
 111 // reference processing during STW evacuation pauses.
 112 class G1STWIsAliveClosure : public BoolObjectClosure {
 113   G1CollectedHeap* _g1h;
 114 public:
 115   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 116   bool do_object_b(oop p);
 117 };
 118 
 119 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 120   G1CollectedHeap* _g1h;
 121 public:
 122   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 123   bool do_object_b(oop p);
 124 };
 125 
 126 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 127  private:
 128   void reset_from_card_cache(uint start_idx, size_t num_regions);
 129  public:
 130   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 131 };
 132 
 133 class G1CollectedHeap : public CollectedHeap {
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VM_G1TryInitiateConcMark;
 138   friend class VMStructs;
 139   friend class MutatorAllocRegion;
 140   friend class G1FullCollector;
 141   friend class G1GCAllocRegion;
 142   friend class G1HeapVerifier;
 143 
 144   // Closures used in implementation.
 145   friend class G1ParScanThreadState;
 146   friend class G1ParScanThreadStateSet;
 147   friend class G1EvacuateRegionsTask;
 148   friend class G1PLABAllocator;
 149 
 150   // Other related classes.
 151   friend class HeapRegionClaimer;
 152 
 153   // Testing classes.
 154   friend class G1CheckRegionAttrTableClosure;
 155 
 156 private:
 157   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 158 
 159   WorkGang* _workers;
 160   G1CardTable* _card_table;
 161 
 162   SoftRefPolicy      _soft_ref_policy;
 163 
 164   static size_t _humongous_object_threshold_in_words;
 165 
 166   // Records the end of the most recent collection regardless of type.
 167   Ticks _collection_pause_end;
 168 
 169   // For CollectedHeap::millis_since_last_gc() support. Records end of the last
 170   // collection that visited all objects.
 171   jlong _time_of_last_gc_ns;
 172 
 173   // These sets keep track of old, archive and humongous regions respectively.
 174   HeapRegionSet _old_set;
 175   HeapRegionSet _archive_set;
 176   HeapRegionSet _humongous_set;
 177 
 178   void eagerly_reclaim_humongous_regions();
 179   // Start a new incremental collection set for the next pause.
 180   void start_new_collection_set();
 181 
 182   // The block offset table for the G1 heap.
 183   G1BlockOffsetTable* _bot;
 184 
 185   // Tears down the region sets / lists so that they are empty and the
 186   // regions on the heap do not belong to a region set / list. The
 187   // only exception is the humongous set which we leave unaltered. If
 188   // free_list_only is true, it will only tear down the master free
 189   // list. It is called before a Full GC (free_list_only == false) or
 190   // before heap shrinking (free_list_only == true).
 191   void tear_down_region_sets(bool free_list_only);
 192 
 193   // Rebuilds the region sets / lists so that they are repopulated to
 194   // reflect the contents of the heap. The only exception is the
 195   // humongous set which was not torn down in the first place. If
 196   // free_list_only is true, it will only rebuild the master free
 197   // list. It is called after a Full GC (free_list_only == false) or
 198   // after heap shrinking (free_list_only == true).
 199   void rebuild_region_sets(bool free_list_only);
 200 
 201   // Callback for region mapping changed events.
 202   G1RegionMappingChangedListener _listener;
 203 
 204   // Handle G1 NUMA support.
 205   G1NUMA* _numa;
 206 
 207   // The sequence of all heap regions in the heap.
 208   HeapRegionManager* _hrm;
 209 
 210   // Manages all allocations with regions except humongous object allocations.
 211   G1Allocator* _allocator;
 212 
 213   // Manages all heap verification.
 214   G1HeapVerifier* _verifier;
 215 
 216   // Outside of GC pauses, the number of bytes used in all regions other
 217   // than the current allocation region(s).
 218   volatile size_t _summary_bytes_used;
 219 
 220   void increase_used(size_t bytes);
 221   void decrease_used(size_t bytes);
 222 
 223   void set_used(size_t bytes);
 224 
 225   // Number of bytes used in all regions during GC. Typically changed when
 226   // retiring a GC alloc region.
 227   size_t _bytes_used_during_gc;
 228 
 229   // Class that handles archive allocation ranges.
 230   G1ArchiveAllocator* _archive_allocator;
 231 
 232   // GC allocation statistics policy for survivors.
 233   G1EvacStats _survivor_evac_stats;
 234 
 235   // GC allocation statistics policy for tenured objects.
 236   G1EvacStats _old_evac_stats;
 237 
 238   // It specifies whether we should attempt to expand the heap after a
 239   // region allocation failure. If heap expansion fails we set this to
 240   // false so that we don't re-attempt the heap expansion (it's likely
 241   // that subsequent expansion attempts will also fail if one fails).
 242   // Currently, it is only consulted during GC and it's reset at the
 243   // start of each GC.
 244   bool _expand_heap_after_alloc_failure;
 245 
 246   // Helper for monitoring and management support.
 247   G1MonitoringSupport* _g1mm;
 248 
 249   // Records whether the region at the given index is (still) a
 250   // candidate for eager reclaim.  Only valid for humongous start
 251   // regions; other regions have unspecified values.  Humongous start
 252   // regions are initialized at start of collection pause, with
 253   // candidates removed from the set as they are found reachable from
 254   // roots or the young generation.
 255   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 256    protected:
 257     bool default_value() const { return false; }
 258    public:
 259     void clear() { G1BiasedMappedArray<bool>::clear(); }
 260     void set_candidate(uint region, bool value) {
 261       set_by_index(region, value);
 262     }
 263     bool is_candidate(uint region) {
 264       return get_by_index(region);
 265     }
 266   };
 267 
 268   HumongousReclaimCandidates _humongous_reclaim_candidates;
 269   // Stores whether during humongous object registration we found candidate regions.
 270   // If not, we can skip a few steps.
 271   bool _has_humongous_reclaim_candidates;
 272 
 273   G1HRPrinter _hr_printer;
 274 
 275   // Return true if an explicit GC should start a concurrent cycle instead
 276   // of doing a STW full GC. A concurrent cycle should be started if:
 277   // (a) cause == _g1_humongous_allocation,
 278   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 279   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 280   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 281   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 282   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 283 
 284   // Attempt to start a concurrent cycle with the indicated cause.
 285   // precondition: should_do_concurrent_full_gc(cause)
 286   bool try_collect_concurrently(GCCause::Cause cause,
 287                                 uint gc_counter,
 288                                 uint old_marking_started_before);
 289 
 290   // Return true if should upgrade to full gc after an incremental one.
 291   bool should_upgrade_to_full_gc(GCCause::Cause cause);
 292 
 293   // indicates whether we are in young or mixed GC mode
 294   G1CollectorState _collector_state;
 295 
 296   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 297   // concurrent cycles) we have started.
 298   volatile uint _old_marking_cycles_started;
 299 
 300   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 301   // concurrent cycles) we have completed.
 302   volatile uint _old_marking_cycles_completed;
 303 
 304   // This is a non-product method that is helpful for testing. It is
 305   // called at the end of a GC and artificially expands the heap by
 306   // allocating a number of dead regions. This way we can induce very
 307   // frequent marking cycles and stress the cleanup / concurrent
 308   // cleanup code more (as all the regions that will be allocated by
 309   // this method will be found dead by the marking cycle).
 310   void allocate_dummy_regions() PRODUCT_RETURN;
 311 
 312   // If the HR printer is active, dump the state of the regions in the
 313   // heap after a compaction.
 314   void print_hrm_post_compaction();
 315 
 316   // Create a memory mapper for auxiliary data structures of the given size and
 317   // translation factor.
 318   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 319                                                          size_t size,
 320                                                          size_t translation_factor);
 321 
 322   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 323 
 324   // These are macros so that, if the assert fires, we get the correct
 325   // line number, file, etc.
 326 
 327 #define heap_locking_asserts_params(_extra_message_)                          \
 328   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 329   (_extra_message_),                                                          \
 330   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 331   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 332   BOOL_TO_STR(Thread::current()->is_VM_thread())
 333 
 334 #define assert_heap_locked()                                                  \
 335   do {                                                                        \
 336     assert(Heap_lock->owned_by_self(),                                        \
 337            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 338   } while (0)
 339 
 340 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 341   do {                                                                        \
 342     assert(Heap_lock->owned_by_self() ||                                      \
 343            (SafepointSynchronize::is_at_safepoint() &&                        \
 344              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 345            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 346                                         "should be at a safepoint"));         \
 347   } while (0)
 348 
 349 #define assert_heap_locked_and_not_at_safepoint()                             \
 350   do {                                                                        \
 351     assert(Heap_lock->owned_by_self() &&                                      \
 352                                     !SafepointSynchronize::is_at_safepoint(), \
 353           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 354                                        "should not be at a safepoint"));      \
 355   } while (0)
 356 
 357 #define assert_heap_not_locked()                                              \
 358   do {                                                                        \
 359     assert(!Heap_lock->owned_by_self(),                                       \
 360         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 361   } while (0)
 362 
 363 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 364   do {                                                                        \
 365     assert(!Heap_lock->owned_by_self() &&                                     \
 366                                     !SafepointSynchronize::is_at_safepoint(), \
 367       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 368                                    "should not be at a safepoint"));          \
 369   } while (0)
 370 
 371 #define assert_at_safepoint_on_vm_thread()                                    \
 372   do {                                                                        \
 373     assert_at_safepoint();                                                    \
 374     assert(Thread::current_or_null() != NULL, "no current thread");           \
 375     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 376   } while (0)
 377 
 378 #ifdef ASSERT
 379 #define assert_used_and_recalculate_used_equal(g1h)                           \
 380   do {                                                                        \
 381     size_t cur_used_bytes = g1h->used();                                      \
 382     size_t recal_used_bytes = g1h->recalculate_used();                        \
 383     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 384            " same as recalculated used(" SIZE_FORMAT ").",                    \
 385            cur_used_bytes, recal_used_bytes);                                 \
 386   } while (0)
 387 #else
 388 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 389 #endif
 390 
 391   const char* young_gc_name() const;
 392 
 393   // The young region list.
 394   G1EdenRegions _eden;
 395   G1SurvivorRegions _survivor;
 396 
 397   STWGCTimer* _gc_timer_stw;
 398 
 399   G1NewTracer* _gc_tracer_stw;
 400 
 401   // The current policy object for the collector.
 402   G1Policy* _policy;
 403   G1HeapSizingPolicy* _heap_sizing_policy;
 404 
 405   G1CollectionSet _collection_set;
 406 
 407   // Try to allocate a single non-humongous HeapRegion sufficient for
 408   // an allocation of the given word_size. If do_expand is true,
 409   // attempt to expand the heap if necessary to satisfy the allocation
 410   // request. 'type' takes the type of region to be allocated. (Use constants
 411   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 412   HeapRegion* new_region(size_t word_size,
 413                          HeapRegionType type,
 414                          bool do_expand,
 415                          uint node_index = G1NUMA::AnyNodeIndex);
 416 
 417   // Initialize a contiguous set of free regions of length num_regions
 418   // and starting at index first so that they appear as a single
 419   // humongous region.
 420   HeapWord* humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
 421                                                       uint num_regions,
 422                                                       size_t word_size);
 423 
 424   // Attempt to allocate a humongous object of the given size. Return
 425   // NULL if unsuccessful.
 426   HeapWord* humongous_obj_allocate(size_t word_size);
 427 
 428   // The following two methods, allocate_new_tlab() and
 429   // mem_allocate(), are the two main entry points from the runtime
 430   // into the G1's allocation routines. They have the following
 431   // assumptions:
 432   //
 433   // * They should both be called outside safepoints.
 434   //
 435   // * They should both be called without holding the Heap_lock.
 436   //
 437   // * All allocation requests for new TLABs should go to
 438   //   allocate_new_tlab().
 439   //
 440   // * All non-TLAB allocation requests should go to mem_allocate().
 441   //
 442   // * If either call cannot satisfy the allocation request using the
 443   //   current allocating region, they will try to get a new one. If
 444   //   this fails, they will attempt to do an evacuation pause and
 445   //   retry the allocation.
 446   //
 447   // * If all allocation attempts fail, even after trying to schedule
 448   //   an evacuation pause, allocate_new_tlab() will return NULL,
 449   //   whereas mem_allocate() will attempt a heap expansion and/or
 450   //   schedule a Full GC.
 451   //
 452   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 453   //   should never be called with word_size being humongous. All
 454   //   humongous allocation requests should go to mem_allocate() which
 455   //   will satisfy them with a special path.
 456 
 457   virtual HeapWord* allocate_new_tlab(size_t min_size,
 458                                       size_t requested_size,
 459                                       size_t* actual_size);
 460 
 461   virtual HeapWord* mem_allocate(size_t word_size,
 462                                  bool*  gc_overhead_limit_was_exceeded);
 463 
 464   // First-level mutator allocation attempt: try to allocate out of
 465   // the mutator alloc region without taking the Heap_lock. This
 466   // should only be used for non-humongous allocations.
 467   inline HeapWord* attempt_allocation(size_t min_word_size,
 468                                       size_t desired_word_size,
 469                                       size_t* actual_word_size);
 470 
 471   // Second-level mutator allocation attempt: take the Heap_lock and
 472   // retry the allocation attempt, potentially scheduling a GC
 473   // pause. This should only be used for non-humongous allocations.
 474   HeapWord* attempt_allocation_slow(size_t word_size);
 475 
 476   // Takes the Heap_lock and attempts a humongous allocation. It can
 477   // potentially schedule a GC pause.
 478   HeapWord* attempt_allocation_humongous(size_t word_size);
 479 
 480   // Allocation attempt that should be called during safepoints (e.g.,
 481   // at the end of a successful GC). expect_null_mutator_alloc_region
 482   // specifies whether the mutator alloc region is expected to be NULL
 483   // or not.
 484   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 485                                             bool expect_null_mutator_alloc_region);
 486 
 487   // These methods are the "callbacks" from the G1AllocRegion class.
 488 
 489   // For mutator alloc regions.
 490   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 491   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 492                                    size_t allocated_bytes);
 493 
 494   // For GC alloc regions.
 495   bool has_more_regions(G1HeapRegionAttr dest);
 496   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
 497   void retire_gc_alloc_region(HeapRegion* alloc_region,
 498                               size_t allocated_bytes, G1HeapRegionAttr dest);
 499 
 500   // - if explicit_gc is true, the GC is for a System.gc() etc,
 501   //   otherwise it's for a failed allocation.
 502   // - if clear_all_soft_refs is true, all soft references should be
 503   //   cleared during the GC.
 504   // - it returns false if it is unable to do the collection due to the
 505   //   GC locker being active, true otherwise.
 506   bool do_full_collection(bool explicit_gc,
 507                           bool clear_all_soft_refs);
 508 
 509   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 510   virtual void do_full_collection(bool clear_all_soft_refs);
 511 
 512   // Callback from VM_G1CollectForAllocation operation.
 513   // This function does everything necessary/possible to satisfy a
 514   // failed allocation request (including collection, expansion, etc.)
 515   HeapWord* satisfy_failed_allocation(size_t word_size,
 516                                       bool* succeeded);
 517   // Internal helpers used during full GC to split it up to
 518   // increase readability.
 519   void abort_concurrent_cycle();
 520   void verify_before_full_collection(bool explicit_gc);
 521   void prepare_heap_for_full_collection();
 522   void prepare_heap_for_mutators();
 523   void abort_refinement();
 524   void verify_after_full_collection();
 525   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 526 
 527   // Helper method for satisfy_failed_allocation()
 528   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 529                                              bool do_gc,
 530                                              bool clear_all_soft_refs,
 531                                              bool expect_null_mutator_alloc_region,
 532                                              bool* gc_succeeded);
 533 
 534   // Attempting to expand the heap sufficiently
 535   // to support an allocation of the given "word_size".  If
 536   // successful, perform the allocation and return the address of the
 537   // allocated block, or else "NULL".
 538   HeapWord* expand_and_allocate(size_t word_size);
 539 
 540   // Process any reference objects discovered.
 541   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 542 
 543   // If during an initial mark pause we may install a pending list head which is not
 544   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 545   // to discover.
 546   void make_pending_list_reachable();
 547 
 548   // Merges the information gathered on a per-thread basis for all worker threads
 549   // during GC into global variables.
 550   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 551 
 552   void verify_numa_regions(const char* desc);
 553 
 554 public:
 555   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 556 
 557   WorkGang* workers() const { return _workers; }
 558 
 559   // Runs the given AbstractGangTask with the current active workers, returning the
 560   // total time taken.
 561   Tickspan run_task(AbstractGangTask* task);
 562 
 563   G1Allocator* allocator() {
 564     return _allocator;
 565   }
 566 
 567   G1HeapVerifier* verifier() {
 568     return _verifier;
 569   }
 570 
 571   G1MonitoringSupport* g1mm() {
 572     assert(_g1mm != NULL, "should have been initialized");
 573     return _g1mm;
 574   }
 575 
 576   void resize_heap_if_necessary();
 577 
 578   G1NUMA* numa() const { return _numa; }
 579 
 580   // Expand the garbage-first heap by at least the given size (in bytes!).
 581   // Returns true if the heap was expanded by the requested amount;
 582   // false otherwise.
 583   // (Rounds up to a HeapRegion boundary.)
 584   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 585   bool expand_single_region(uint node_index);
 586 
 587   // Returns the PLAB statistics for a given destination.
 588   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 589 
 590   // Determines PLAB size for a given destination.
 591   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 592 
 593   // Do anything common to GC's.
 594   void gc_prologue(bool full);
 595   void gc_epilogue(bool full);
 596 
 597   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 598   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 599 
 600   // Modify the reclaim candidate set and test for presence.
 601   // These are only valid for starts_humongous regions.
 602   inline void set_humongous_reclaim_candidate(uint region, bool value);
 603   inline bool is_humongous_reclaim_candidate(uint region);
 604   inline void set_has_humongous_reclaim_candidate(bool value);
 605 
 606   // Remove from the reclaim candidate set.  Also remove from the
 607   // collection set so that later encounters avoid the slow path.
 608   inline void set_humongous_is_live(oop obj);
 609 
 610   // Register the given region to be part of the collection set.
 611   inline void register_humongous_region_with_region_attr(uint index);
 612 
 613   // We register a region with the fast "in collection set" test. We
 614   // simply set to true the array slot corresponding to this region.
 615   void register_young_region_with_region_attr(HeapRegion* r) {
 616     _region_attr.set_in_young(r->hrm_index());
 617   }
 618   inline void register_region_with_region_attr(HeapRegion* r);
 619   inline void register_old_region_with_region_attr(HeapRegion* r);
 620   inline void register_optional_region_with_region_attr(HeapRegion* r);
 621 
 622   void clear_region_attr(const HeapRegion* hr) {
 623     _region_attr.clear(hr);
 624   }
 625 
 626   void clear_region_attr() {
 627     _region_attr.clear();
 628   }
 629 
 630   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 631   // for all regions.
 632   void verify_region_attr_remset_update() PRODUCT_RETURN;
 633 
 634   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 635 
 636   // This is called at the start of either a concurrent cycle or a Full
 637   // GC to update the number of old marking cycles started.
 638   void increment_old_marking_cycles_started();
 639 
 640   // This is called at the end of either a concurrent cycle or a Full
 641   // GC to update the number of old marking cycles completed. Those two
 642   // can happen in a nested fashion, i.e., we start a concurrent
 643   // cycle, a Full GC happens half-way through it which ends first,
 644   // and then the cycle notices that a Full GC happened and ends
 645   // too. The concurrent parameter is a boolean to help us do a bit
 646   // tighter consistency checking in the method. If concurrent is
 647   // false, the caller is the inner caller in the nesting (i.e., the
 648   // Full GC). If concurrent is true, the caller is the outer caller
 649   // in this nesting (i.e., the concurrent cycle). Further nesting is
 650   // not currently supported. The end of this call also notifies
 651   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 652   // GC to happen (e.g., it called System.gc() with
 653   // +ExplicitGCInvokesConcurrent).
 654   void increment_old_marking_cycles_completed(bool concurrent);
 655 
 656   uint old_marking_cycles_completed() {
 657     return _old_marking_cycles_completed;
 658   }
 659 
 660   G1HRPrinter* hr_printer() { return &_hr_printer; }
 661 
 662   // Allocates a new heap region instance.
 663   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 664 
 665   // Allocate the highest free region in the reserved heap. This will commit
 666   // regions as necessary.
 667   HeapRegion* alloc_highest_free_region();
 668 
 669   // Frees a region by resetting its metadata and adding it to the free list
 670   // passed as a parameter (this is usually a local list which will be appended
 671   // to the master free list later or NULL if free list management is handled
 672   // in another way).
 673   // Callers must ensure they are the only one calling free on the given region
 674   // at the same time.
 675   void free_region(HeapRegion* hr, FreeRegionList* free_list);
 676 
 677   // It dirties the cards that cover the block so that the post
 678   // write barrier never queues anything when updating objects on this
 679   // block. It is assumed (and in fact we assert) that the block
 680   // belongs to a young region.
 681   inline void dirty_young_block(HeapWord* start, size_t word_size);
 682 
 683   // Frees a humongous region by collapsing it into individual regions
 684   // and calling free_region() for each of them. The freed regions
 685   // will be added to the free list that's passed as a parameter (this
 686   // is usually a local list which will be appended to the master free
 687   // list later).
 688   // The method assumes that only a single thread is ever calling
 689   // this for a particular region at once.
 690   void free_humongous_region(HeapRegion* hr,
 691                              FreeRegionList* free_list);
 692 
 693   // Facility for allocating in 'archive' regions in high heap memory and
 694   // recording the allocated ranges. These should all be called from the
 695   // VM thread at safepoints, without the heap lock held. They can be used
 696   // to create and archive a set of heap regions which can be mapped at the
 697   // same fixed addresses in a subsequent JVM invocation.
 698   void begin_archive_alloc_range(bool open = false);
 699 
 700   // Check if the requested size would be too large for an archive allocation.
 701   bool is_archive_alloc_too_large(size_t word_size);
 702 
 703   // Allocate memory of the requested size from the archive region. This will
 704   // return NULL if the size is too large or if no memory is available. It
 705   // does not trigger a garbage collection.
 706   HeapWord* archive_mem_allocate(size_t word_size);
 707 
 708   // Optionally aligns the end address and returns the allocated ranges in
 709   // an array of MemRegions in order of ascending addresses.
 710   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 711                                size_t end_alignment_in_bytes = 0);
 712 
 713   // Facility for allocating a fixed range within the heap and marking
 714   // the containing regions as 'archive'. For use at JVM init time, when the
 715   // caller may mmap archived heap data at the specified range(s).
 716   // Verify that the MemRegions specified in the argument array are within the
 717   // reserved heap.
 718   bool check_archive_addresses(MemRegion* range, size_t count);
 719 
 720   // Commit the appropriate G1 regions containing the specified MemRegions
 721   // and mark them as 'archive' regions. The regions in the array must be
 722   // non-overlapping and in order of ascending address.
 723   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 724 
 725   // Insert any required filler objects in the G1 regions around the specified
 726   // ranges to make the regions parseable. This must be called after
 727   // alloc_archive_regions, and after class loading has occurred.
 728   void fill_archive_regions(MemRegion* range, size_t count);
 729 
 730   // For each of the specified MemRegions, uncommit the containing G1 regions
 731   // which had been allocated by alloc_archive_regions. This should be called
 732   // rather than fill_archive_regions at JVM init time if the archive file
 733   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 734   void dealloc_archive_regions(MemRegion* range, size_t count);
 735 
 736   oop materialize_archived_object(oop obj);
 737 
 738 private:
 739 
 740   // Shrink the garbage-first heap by at most the given size (in bytes!).
 741   // (Rounds down to a HeapRegion boundary.)
 742   void shrink(size_t expand_bytes);
 743   void shrink_helper(size_t expand_bytes);
 744 
 745   #if TASKQUEUE_STATS
 746   static void print_taskqueue_stats_hdr(outputStream* const st);
 747   void print_taskqueue_stats() const;
 748   void reset_taskqueue_stats();
 749   #endif // TASKQUEUE_STATS
 750 
 751   // Schedule the VM operation that will do an evacuation pause to
 752   // satisfy an allocation request of word_size. *succeeded will
 753   // return whether the VM operation was successful (it did do an
 754   // evacuation pause) or not (another thread beat us to it or the GC
 755   // locker was active). Given that we should not be holding the
 756   // Heap_lock when we enter this method, we will pass the
 757   // gc_count_before (i.e., total_collections()) as a parameter since
 758   // it has to be read while holding the Heap_lock. Currently, both
 759   // methods that call do_collection_pause() release the Heap_lock
 760   // before the call, so it's easy to read gc_count_before just before.
 761   HeapWord* do_collection_pause(size_t         word_size,
 762                                 uint           gc_count_before,
 763                                 bool*          succeeded,
 764                                 GCCause::Cause gc_cause);
 765 
 766   void wait_for_root_region_scanning();
 767 
 768   // Perform an incremental collection at a safepoint, possibly
 769   // followed by a by-policy upgrade to a full collection.  Returns
 770   // false if unable to do the collection due to the GC locker being
 771   // active, true otherwise.
 772   // precondition: at safepoint on VM thread
 773   // precondition: !is_gc_active()
 774   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 775 
 776   // Helper for do_collection_pause_at_safepoint, containing the guts
 777   // of the incremental collection pause, executed by the vm thread.
 778   void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);
 779 
 780   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 781   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 782   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 783 
 784   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 785 
 786   // Actually do the work of evacuating the parts of the collection set.
 787   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 788   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 789 private:
 790   // Evacuate the next set of optional regions.
 791   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 792 
 793 public:
 794   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 795   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 796                                     G1RedirtyCardsQueueSet* rdcqs,
 797                                     G1ParScanThreadStateSet* pss);
 798 
 799   void expand_heap_after_young_collection();
 800   // Update object copying statistics.
 801   void record_obj_copy_mem_stats();
 802 
 803   // The hot card cache for remembered set insertion optimization.
 804   G1HotCardCache* _hot_card_cache;
 805 
 806   // The g1 remembered set of the heap.
 807   G1RemSet* _rem_set;
 808 
 809   // After a collection pause, convert the regions in the collection set into free
 810   // regions.
 811   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 812 
 813   // Abandon the current collection set without recording policy
 814   // statistics or updating free lists.
 815   void abandon_collection_set(G1CollectionSet* collection_set);
 816 
 817   // The concurrent marker (and the thread it runs in.)
 818   G1ConcurrentMark* _cm;
 819   G1ConcurrentMarkThread* _cm_thread;
 820 
 821   // The concurrent refiner.
 822   G1ConcurrentRefine* _cr;
 823 
 824   // The parallel task queues
 825   G1ScannerTasksQueueSet *_task_queues;
 826 
 827   // True iff a evacuation has failed in the current collection.
 828   bool _evacuation_failed;
 829 
 830   EvacuationFailedInfo* _evacuation_failed_info_array;
 831 
 832   // Failed evacuations cause some logical from-space objects to have
 833   // forwarding pointers to themselves.  Reset them.
 834   void remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs);
 835 
 836   // Restore the objects in the regions in the collection set after an
 837   // evacuation failure.
 838   void restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs);
 839 
 840   PreservedMarksSet _preserved_marks_set;
 841 
 842   // Preserve the mark of "obj", if necessary, in preparation for its mark
 843   // word being overwritten with a self-forwarding-pointer.
 844   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
 845 
 846 #ifndef PRODUCT
 847   // Support for forcing evacuation failures. Analogous to
 848   // PromotionFailureALot for the other collectors.
 849 
 850   // Records whether G1EvacuationFailureALot should be in effect
 851   // for the current GC
 852   bool _evacuation_failure_alot_for_current_gc;
 853 
 854   // Used to record the GC number for interval checking when
 855   // determining whether G1EvaucationFailureALot is in effect
 856   // for the current GC.
 857   size_t _evacuation_failure_alot_gc_number;
 858 
 859   // Count of the number of evacuations between failures.
 860   volatile size_t _evacuation_failure_alot_count;
 861 
 862   // Set whether G1EvacuationFailureALot should be in effect
 863   // for the current GC (based upon the type of GC and which
 864   // command line flags are set);
 865   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 866                                                   bool during_initial_mark,
 867                                                   bool mark_or_rebuild_in_progress);
 868 
 869   inline void set_evacuation_failure_alot_for_current_gc();
 870 
 871   // Return true if it's time to cause an evacuation failure.
 872   inline bool evacuation_should_fail();
 873 
 874   // Reset the G1EvacuationFailureALot counters.  Should be called at
 875   // the end of an evacuation pause in which an evacuation failure occurred.
 876   inline void reset_evacuation_should_fail();
 877 #endif // !PRODUCT
 878 
 879   // ("Weak") Reference processing support.
 880   //
 881   // G1 has 2 instances of the reference processor class. One
 882   // (_ref_processor_cm) handles reference object discovery
 883   // and subsequent processing during concurrent marking cycles.
 884   //
 885   // The other (_ref_processor_stw) handles reference object
 886   // discovery and processing during full GCs and incremental
 887   // evacuation pauses.
 888   //
 889   // During an incremental pause, reference discovery will be
 890   // temporarily disabled for _ref_processor_cm and will be
 891   // enabled for _ref_processor_stw. At the end of the evacuation
 892   // pause references discovered by _ref_processor_stw will be
 893   // processed and discovery will be disabled. The previous
 894   // setting for reference object discovery for _ref_processor_cm
 895   // will be re-instated.
 896   //
 897   // At the start of marking:
 898   //  * Discovery by the CM ref processor is verified to be inactive
 899   //    and it's discovered lists are empty.
 900   //  * Discovery by the CM ref processor is then enabled.
 901   //
 902   // At the end of marking:
 903   //  * Any references on the CM ref processor's discovered
 904   //    lists are processed (possibly MT).
 905   //
 906   // At the start of full GC we:
 907   //  * Disable discovery by the CM ref processor and
 908   //    empty CM ref processor's discovered lists
 909   //    (without processing any entries).
 910   //  * Verify that the STW ref processor is inactive and it's
 911   //    discovered lists are empty.
 912   //  * Temporarily set STW ref processor discovery as single threaded.
 913   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 914   //    field.
 915   //  * Finally enable discovery by the STW ref processor.
 916   //
 917   // The STW ref processor is used to record any discovered
 918   // references during the full GC.
 919   //
 920   // At the end of a full GC we:
 921   //  * Enqueue any reference objects discovered by the STW ref processor
 922   //    that have non-live referents. This has the side-effect of
 923   //    making the STW ref processor inactive by disabling discovery.
 924   //  * Verify that the CM ref processor is still inactive
 925   //    and no references have been placed on it's discovered
 926   //    lists (also checked as a precondition during initial marking).
 927 
 928   // The (stw) reference processor...
 929   ReferenceProcessor* _ref_processor_stw;
 930 
 931   // During reference object discovery, the _is_alive_non_header
 932   // closure (if non-null) is applied to the referent object to
 933   // determine whether the referent is live. If so then the
 934   // reference object does not need to be 'discovered' and can
 935   // be treated as a regular oop. This has the benefit of reducing
 936   // the number of 'discovered' reference objects that need to
 937   // be processed.
 938   //
 939   // Instance of the is_alive closure for embedding into the
 940   // STW reference processor as the _is_alive_non_header field.
 941   // Supplying a value for the _is_alive_non_header field is
 942   // optional but doing so prevents unnecessary additions to
 943   // the discovered lists during reference discovery.
 944   G1STWIsAliveClosure _is_alive_closure_stw;
 945 
 946   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 947 
 948   // The (concurrent marking) reference processor...
 949   ReferenceProcessor* _ref_processor_cm;
 950 
 951   // Instance of the concurrent mark is_alive closure for embedding
 952   // into the Concurrent Marking reference processor as the
 953   // _is_alive_non_header field. Supplying a value for the
 954   // _is_alive_non_header field is optional but doing so prevents
 955   // unnecessary additions to the discovered lists during reference
 956   // discovery.
 957   G1CMIsAliveClosure _is_alive_closure_cm;
 958 
 959   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 960 public:
 961 
 962   G1ScannerTasksQueue* task_queue(uint i) const;
 963 
 964   uint num_task_queues() const;
 965 
 966   // Create a G1CollectedHeap.
 967   // Must call the initialize method afterwards.
 968   // May not return if something goes wrong.
 969   G1CollectedHeap();
 970 
 971 private:
 972   jint initialize_concurrent_refinement();
 973   jint initialize_young_gen_sampling_thread();
 974 public:
 975   // Initialize the G1CollectedHeap to have the initial and
 976   // maximum sizes and remembered and barrier sets
 977   // specified by the policy object.
 978   jint initialize();
 979 
 980   virtual void stop();
 981   virtual void safepoint_synchronize_begin();
 982   virtual void safepoint_synchronize_end();
 983 
 984   // Does operations required after initialization has been done.
 985   void post_initialize();
 986 
 987   // Initialize weak reference processing.
 988   void ref_processing_init();
 989 
 990   virtual Name kind() const {
 991     return CollectedHeap::G1;
 992   }
 993 
 994   virtual const char* name() const {
 995     return "G1";
 996   }
 997 
 998   const G1CollectorState* collector_state() const { return &_collector_state; }
 999   G1CollectorState* collector_state() { return &_collector_state; }
1000 
1001   // The current policy object for the collector.
1002   G1Policy* policy() const { return _policy; }
1003   // The remembered set.
1004   G1RemSet* rem_set() const { return _rem_set; }
1005 
1006   inline G1GCPhaseTimes* phase_times() const;
1007 
1008   HeapRegionManager* hrm() const { return _hrm; }
1009 
1010   const G1CollectionSet* collection_set() const { return &_collection_set; }
1011   G1CollectionSet* collection_set() { return &_collection_set; }
1012 
1013   virtual SoftRefPolicy* soft_ref_policy();
1014 
1015   virtual void initialize_serviceability();
1016   virtual MemoryUsage memory_usage();
1017   virtual GrowableArray<GCMemoryManager*> memory_managers();
1018   virtual GrowableArray<MemoryPool*> memory_pools();
1019 
1020   // Try to minimize the remembered set.
1021   void scrub_rem_set();
1022 
1023   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1024   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
1025 
1026   // The shared block offset table array.
1027   G1BlockOffsetTable* bot() const { return _bot; }
1028 
1029   // Reference Processing accessors
1030 
1031   // The STW reference processor....
1032   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1033 
1034   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1035 
1036   // The Concurrent Marking reference processor...
1037   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1038 
1039   size_t unused_committed_regions_in_bytes() const;
1040 
1041   virtual size_t capacity() const;
1042   virtual size_t used() const;
1043   // This should be called when we're not holding the heap lock. The
1044   // result might be a bit inaccurate.
1045   size_t used_unlocked() const;
1046   size_t recalculate_used() const;
1047 
1048   // These virtual functions do the actual allocation.
1049   // Some heaps may offer a contiguous region for shared non-blocking
1050   // allocation, via inlined code (by exporting the address of the top and
1051   // end fields defining the extent of the contiguous allocation region.)
1052   // But G1CollectedHeap doesn't yet support this.
1053 
1054   virtual bool is_maximal_no_gc() const {
1055     return _hrm->available() == 0;
1056   }
1057 
1058   // Returns whether there are any regions left in the heap for allocation.
1059   bool has_regions_left_for_allocation() const {
1060     return !is_maximal_no_gc() || num_free_regions() != 0;
1061   }
1062 
1063   // The current number of regions in the heap.
1064   uint num_regions() const { return _hrm->length(); }
1065 
1066   // The max number of regions in the heap.
1067   uint max_regions() const { return _hrm->max_length(); }
1068 
1069   // Max number of regions that can be comitted.
1070   uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
1071 
1072   // The number of regions that are completely free.
1073   uint num_free_regions() const { return _hrm->num_free_regions(); }
1074 
1075   // The number of regions that can be allocated into.
1076   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1077 
1078   MemoryUsage get_auxiliary_data_memory_usage() const {
1079     return _hrm->get_auxiliary_data_memory_usage();
1080   }
1081 
1082   // The number of regions that are not completely free.
1083   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1084 
1085 #ifdef ASSERT
1086   bool is_on_master_free_list(HeapRegion* hr) {
1087     return _hrm->is_free(hr);
1088   }
1089 #endif // ASSERT
1090 
1091   inline void old_set_add(HeapRegion* hr);
1092   inline void old_set_remove(HeapRegion* hr);
1093 
1094   inline void archive_set_add(HeapRegion* hr);
1095 
1096   size_t non_young_capacity_bytes() {
1097     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1098   }
1099 
1100   // Determine whether the given region is one that we are using as an
1101   // old GC alloc region.
1102   bool is_old_gc_alloc_region(HeapRegion* hr);
1103 
1104   // Perform a collection of the heap; intended for use in implementing
1105   // "System.gc".  This probably implies as full a collection as the
1106   // "CollectedHeap" supports.
1107   virtual void collect(GCCause::Cause cause);
1108 
1109   // Perform a collection of the heap with the given cause.
1110   // Returns whether this collection actually executed.
1111   bool try_collect(GCCause::Cause cause);
1112 
1113   // True iff an evacuation has failed in the most-recent collection.
1114   bool evacuation_failed() { return _evacuation_failed; }
1115 
1116   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1117   void prepend_to_freelist(FreeRegionList* list);
1118   void decrement_summary_bytes(size_t bytes);
1119 
1120   virtual bool is_in(const void* p) const;
1121 #ifdef ASSERT
1122   // Returns whether p is in one of the available areas of the heap. Slow but
1123   // extensive version.
1124   bool is_in_exact(const void* p) const;
1125 #endif
1126 
1127   // Return "TRUE" iff the given object address is within the collection
1128   // set. Assumes that the reference points into the heap.
1129   inline bool is_in_cset(const HeapRegion *hr);
1130   inline bool is_in_cset(oop obj);
1131   inline bool is_in_cset(HeapWord* addr);
1132 
1133   inline bool is_in_cset_or_humongous(const oop obj);
1134 
1135  private:
1136   // This array is used for a quick test on whether a reference points into
1137   // the collection set or not. Each of the array's elements denotes whether the
1138   // corresponding region is in the collection set or not.
1139   G1HeapRegionAttrBiasedMappedArray _region_attr;
1140 
1141  public:
1142 
1143   inline G1HeapRegionAttr region_attr(const void* obj) const;
1144   inline G1HeapRegionAttr region_attr(uint idx) const;
1145 
1146   // Return "TRUE" iff the given object address is in the reserved
1147   // region of g1.
1148   bool is_in_g1_reserved(const void* p) const {
1149     return _hrm->reserved().contains(p);
1150   }
1151 
1152   // Returns a MemRegion that corresponds to the space that has been
1153   // reserved for the heap
1154   MemRegion g1_reserved() const {
1155     return _hrm->reserved();
1156   }
1157 
1158   MemRegion reserved_region() const {
1159     return _reserved;
1160   }
1161 
1162   HeapWord* base() const {
1163     return _reserved.start();
1164   }
1165 
1166   bool is_in_reserved(const void* addr) const {
1167     return _reserved.contains(addr);
1168   }
1169 
1170   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1171 
1172   G1CardTable* card_table() const {
1173     return _card_table;
1174   }
1175 
1176   // Iteration functions.
1177 
1178   // Iterate over all objects, calling "cl.do_object" on each.
1179   virtual void object_iterate(ObjectClosure* cl);
1180 
1181   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1182   virtual void keep_alive(oop obj);
1183 
1184   // Iterate over heap regions, in address order, terminating the
1185   // iteration early if the "do_heap_region" method returns "true".
1186   void heap_region_iterate(HeapRegionClosure* blk) const;
1187 
1188   // Return the region with the given index. It assumes the index is valid.
1189   inline HeapRegion* region_at(uint index) const;
1190   inline HeapRegion* region_at_or_null(uint index) const;
1191 
1192   // Return the next region (by index) that is part of the same
1193   // humongous object that hr is part of.
1194   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1195 
1196   // Calculate the region index of the given address. Given address must be
1197   // within the heap.
1198   inline uint addr_to_region(HeapWord* addr) const;
1199 
1200   inline HeapWord* bottom_addr_for_region(uint index) const;
1201 
1202   // Two functions to iterate over the heap regions in parallel. Threads
1203   // compete using the HeapRegionClaimer to claim the regions before
1204   // applying the closure on them.
1205   // The _from_worker_offset version uses the HeapRegionClaimer and
1206   // the worker id to calculate a start offset to prevent all workers to
1207   // start from the point.
1208   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1209                                                   HeapRegionClaimer* hrclaimer,
1210                                                   uint worker_id) const;
1211 
1212   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1213                                           HeapRegionClaimer* hrclaimer) const;
1214 
1215   // Iterate over all regions in the collection set in parallel.
1216   void collection_set_par_iterate_all(HeapRegionClosure* cl,
1217                                       HeapRegionClaimer* hr_claimer,
1218                                       uint worker_id);
1219 
1220   // Iterate over all regions currently in the current collection set.
1221   void collection_set_iterate_all(HeapRegionClosure* blk);
1222 
1223   // Iterate over the regions in the current increment of the collection set.
1224   // Starts the iteration so that the start regions of a given worker id over the
1225   // set active_workers are evenly spread across the set of collection set regions
1226   // to be iterated.
1227   // The variant with the HeapRegionClaimer guarantees that the closure will be
1228   // applied to a particular region exactly once.
1229   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1230     collection_set_iterate_increment_from(blk, NULL, worker_id);
1231   }
1232   void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1233 
1234   // Returns the HeapRegion that contains addr. addr must not be NULL.
1235   template <class T>
1236   inline HeapRegion* heap_region_containing(const T addr) const;
1237 
1238   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1239   // region. addr must not be NULL.
1240   template <class T>
1241   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1242 
1243   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1244   // each address in the (reserved) heap is a member of exactly
1245   // one block.  The defining characteristic of a block is that it is
1246   // possible to find its size, and thus to progress forward to the next
1247   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1248   // represent Java objects, or they might be free blocks in a
1249   // free-list-based heap (or subheap), as long as the two kinds are
1250   // distinguishable and the size of each is determinable.
1251 
1252   // Returns the address of the start of the "block" that contains the
1253   // address "addr".  We say "blocks" instead of "object" since some heaps
1254   // may not pack objects densely; a chunk may either be an object or a
1255   // non-object.
1256   HeapWord* block_start(const void* addr) const;
1257 
1258   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1259   // the block is an object.
1260   bool block_is_obj(const HeapWord* addr) const;
1261 
1262   // Section on thread-local allocation buffers (TLABs)
1263   // See CollectedHeap for semantics.
1264 
1265   bool supports_tlab_allocation() const;
1266   size_t tlab_capacity(Thread* ignored) const;
1267   size_t tlab_used(Thread* ignored) const;
1268   size_t max_tlab_size() const;
1269   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1270 
1271   inline bool is_in_young(const oop obj);
1272 
1273   // Returns "true" iff the given word_size is "very large".
1274   static bool is_humongous(size_t word_size) {
1275     // Note this has to be strictly greater-than as the TLABs
1276     // are capped at the humongous threshold and we want to
1277     // ensure that we don't try to allocate a TLAB as
1278     // humongous and that we don't allocate a humongous
1279     // object in a TLAB.
1280     return word_size > _humongous_object_threshold_in_words;
1281   }
1282 
1283   // Returns the humongous threshold for a specific region size
1284   static size_t humongous_threshold_for(size_t region_size) {
1285     return (region_size / 2);
1286   }
1287 
1288   // Returns the number of regions the humongous object of the given word size
1289   // requires.
1290   static size_t humongous_obj_size_in_regions(size_t word_size);
1291 
1292   // Print the maximum heap capacity.
1293   virtual size_t max_capacity() const;
1294 
1295   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1296   virtual size_t max_reserved_capacity() const;
1297 
1298   Ticks last_collection_pause_end() const { return _collection_pause_end; }
1299   virtual jlong millis_since_last_gc();

1300 
1301   // Convenience function to be used in situations where the heap type can be
1302   // asserted to be this type.
1303   static G1CollectedHeap* heap() {
1304     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1305   }
1306 
1307   void set_region_short_lived_locked(HeapRegion* hr);
1308   // add appropriate methods for any other surv rate groups
1309 
1310   const G1SurvivorRegions* survivor() const { return &_survivor; }
1311 
1312   uint eden_regions_count() const { return _eden.length(); }
1313   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1314   uint survivor_regions_count() const { return _survivor.length(); }
1315   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1316   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1317   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1318   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1319   uint old_regions_count() const { return _old_set.length(); }
1320   uint archive_regions_count() const { return _archive_set.length(); }
1321   uint humongous_regions_count() const { return _humongous_set.length(); }
1322 
1323 #ifdef ASSERT
1324   bool check_young_list_empty();
1325 #endif
1326 
1327   // *** Stuff related to concurrent marking.  It's not clear to me that so
1328   // many of these need to be public.
1329 
1330   // The functions below are helper functions that a subclass of
1331   // "CollectedHeap" can use in the implementation of its virtual
1332   // functions.
1333   // This performs a concurrent marking of the live objects in a
1334   // bitmap off to the side.
1335   void do_concurrent_mark();
1336 
1337   bool is_marked_next(oop obj) const;
1338 
1339   // Determine if an object is dead, given the object and also
1340   // the region to which the object belongs. An object is dead
1341   // iff a) it was not allocated since the last mark, b) it
1342   // is not marked, and c) it is not in an archive region.
1343   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1344     return
1345       hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1346       !hr->is_archive();
1347   }
1348 
1349   // This function returns true when an object has been
1350   // around since the previous marking and hasn't yet
1351   // been marked during this marking, and is not in an archive region.
1352   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1353     return
1354       !hr->obj_allocated_since_next_marking(obj) &&
1355       !is_marked_next(obj) &&
1356       !hr->is_archive();
1357   }
1358 
1359   // Determine if an object is dead, given only the object itself.
1360   // This will find the region to which the object belongs and
1361   // then call the region version of the same function.
1362 
1363   // Added if it is NULL it isn't dead.
1364 
1365   inline bool is_obj_dead(const oop obj) const;
1366 
1367   inline bool is_obj_ill(const oop obj) const;
1368 
1369   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1370   inline bool is_obj_dead_full(const oop obj) const;
1371 
1372   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1373 
1374   // Refinement
1375 
1376   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1377 
1378   // Optimized nmethod scanning support routines
1379 
1380   // Register the given nmethod with the G1 heap.
1381   virtual void register_nmethod(nmethod* nm);
1382 
1383   // Unregister the given nmethod from the G1 heap.
1384   virtual void unregister_nmethod(nmethod* nm);
1385 
1386   // No nmethod flushing needed.
1387   virtual void flush_nmethod(nmethod* nm) {}
1388 
1389   // No nmethod verification implemented.
1390   virtual void verify_nmethod(nmethod* nm) {}
1391 
1392   // Free up superfluous code root memory.
1393   void purge_code_root_memory();
1394 
1395   // Rebuild the strong code root lists for each region
1396   // after a full GC.
1397   void rebuild_strong_code_roots();
1398 
1399   // Partial cleaning of VM internal data structures.
1400   void string_dedup_cleaning(BoolObjectClosure* is_alive,
1401                              OopClosure* keep_alive,
1402                              G1GCPhaseTimes* phase_times = NULL);
1403 
1404   // Performs cleaning of data structures after class unloading.
1405   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1406 
1407   // Redirty logged cards in the refinement queue.
1408   void redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs);
1409 
1410   // Verification
1411 
1412   // Deduplicate the string
1413   virtual void deduplicate_string(oop str);
1414 
1415   // Perform any cleanup actions necessary before allowing a verification.
1416   virtual void prepare_for_verify();
1417 
1418   // Perform verification.
1419 
1420   // vo == UsePrevMarking -> use "prev" marking information,
1421   // vo == UseNextMarking -> use "next" marking information
1422   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1423   //
1424   // NOTE: Only the "prev" marking information is guaranteed to be
1425   // consistent most of the time, so most calls to this should use
1426   // vo == UsePrevMarking.
1427   // Currently, there is only one case where this is called with
1428   // vo == UseNextMarking, which is to verify the "next" marking
1429   // information at the end of remark.
1430   // Currently there is only one place where this is called with
1431   // vo == UseFullMarking, which is to verify the marking during a
1432   // full GC.
1433   void verify(VerifyOption vo);
1434 
1435   // WhiteBox testing support.
1436   virtual bool supports_concurrent_gc_breakpoints() const;
1437   bool is_heterogeneous_heap() const;
1438 
1439   virtual WorkGang* get_safepoint_workers() { return _workers; }
1440 
1441   // The methods below are here for convenience and dispatch the
1442   // appropriate method depending on value of the given VerifyOption
1443   // parameter. The values for that parameter, and their meanings,
1444   // are the same as those above.
1445 
1446   bool is_obj_dead_cond(const oop obj,
1447                         const HeapRegion* hr,
1448                         const VerifyOption vo) const;
1449 
1450   bool is_obj_dead_cond(const oop obj,
1451                         const VerifyOption vo) const;
1452 
1453   G1HeapSummary create_g1_heap_summary();
1454   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1455 
1456   // Printing
1457 private:
1458   void print_heap_regions() const;
1459   void print_regions_on(outputStream* st) const;
1460 
1461 public:
1462   virtual void print_on(outputStream* st) const;
1463   virtual void print_extended_on(outputStream* st) const;
1464   virtual void print_on_error(outputStream* st) const;
1465 
1466   virtual void gc_threads_do(ThreadClosure* tc) const;
1467 
1468   // Override
1469   void print_tracing_info() const;
1470 
1471   // The following two methods are helpful for debugging RSet issues.
1472   void print_cset_rsets() PRODUCT_RETURN;
1473   void print_all_rsets() PRODUCT_RETURN;
1474 
1475   // Used to print information about locations in the hs_err file.
1476   virtual bool print_location(outputStream* st, void* addr) const;
1477 };
1478 
1479 class G1ParEvacuateFollowersClosure : public VoidClosure {
1480 private:
1481   double _start_term;
1482   double _term_time;
1483   size_t _term_attempts;
1484 
1485   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1486   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1487 protected:
1488   G1CollectedHeap*              _g1h;
1489   G1ParScanThreadState*         _par_scan_state;
1490   G1ScannerTasksQueueSet*       _queues;
1491   TaskTerminator*               _terminator;
1492   G1GCPhaseTimes::GCParPhases   _phase;
1493 
1494   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1495   G1ScannerTasksQueueSet* queues()         { return _queues; }
1496   TaskTerminator*         terminator()     { return _terminator; }
1497 
1498 public:
1499   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1500                                 G1ParScanThreadState* par_scan_state,
1501                                 G1ScannerTasksQueueSet* queues,
1502                                 TaskTerminator* terminator,
1503                                 G1GCPhaseTimes::GCParPhases phase)
1504     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1505       _g1h(g1h), _par_scan_state(par_scan_state),
1506       _queues(queues), _terminator(terminator), _phase(phase) {}
1507 
1508   void do_void();
1509 
1510   double term_time() const { return _term_time; }
1511   size_t term_attempts() const { return _term_attempts; }
1512 
1513 private:
1514   inline bool offer_termination();
1515 };
1516 
1517 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP
--- EOF ---