src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 4801 : imported patch code-movement
rev 4802 : imported patch optimize-nmethod-scanning
   1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/g1/g1HRPrinter.hpp"
  31 #include "gc_implementation/g1/g1RemSet.hpp"
  32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  33 #include "gc_implementation/g1/heapRegionSeq.hpp"
  34 #include "gc_implementation/g1/heapRegionSets.hpp"
  35 #include "gc_implementation/shared/hSpaceCounters.hpp"
  36 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  37 #include "memory/barrierSet.hpp"
  38 #include "memory/memRegion.hpp"
  39 #include "memory/sharedHeap.hpp"
  40 #include "utilities/stack.hpp"
  41 
  42 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  43 // It uses the "Garbage First" heap organization and algorithm, which
  44 // may combine concurrent marking with parallel, incremental compaction of
  45 // heap subsets that will yield large amounts of garbage.
  46 

  47 class HeapRegion;
  48 class HRRSCleanupTask;
  49 class GenerationSpec;
  50 class OopsInHeapRegionClosure;
  51 class G1KlassScanClosure;
  52 class G1ScanHeapEvacClosure;
  53 class ObjectClosure;
  54 class SpaceClosure;
  55 class CompactibleSpaceClosure;
  56 class Space;
  57 class G1CollectorPolicy;
  58 class GenRemSet;
  59 class G1RemSet;
  60 class HeapRegionRemSetIterator;
  61 class ConcurrentMark;
  62 class ConcurrentMarkThread;
  63 class ConcurrentG1Refine;
  64 class GenerationCounters;

  65 
  66 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
  67 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
  68 
  69 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
  70 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  71 
  72 enum GCAllocPurpose {
  73   GCAllocForTenured,
  74   GCAllocForSurvived,
  75   GCAllocPurposeCount
  76 };
  77 
  78 class YoungList : public CHeapObj<mtGC> {
  79 private:
  80   G1CollectedHeap* _g1h;
  81 
  82   HeapRegion* _head;
  83 
  84   HeapRegion* _survivor_head;


 139 
 140   HeapRegion* first_region() { return _head; }
 141   HeapRegion* first_survivor_region() { return _survivor_head; }
 142   HeapRegion* last_survivor_region() { return _survivor_tail; }
 143 
 144   // debugging
 145   bool          check_list_well_formed();
 146   bool          check_list_empty(bool check_sample = true);
 147   void          print();
 148 };
 149 
 150 class MutatorAllocRegion : public G1AllocRegion {
 151 protected:
 152   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 153   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 154 public:
 155   MutatorAllocRegion()
 156     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 157 };
 158 
 159 // The G1 STW is alive closure.
 160 // An instance is embedded into the G1CH and used as the
 161 // (optional) _is_alive_non_header closure in the STW
 162 // reference processor. It is also extensively used during
 163 // refence processing during STW evacuation pauses.
 164 class G1STWIsAliveClosure: public BoolObjectClosure {
 165   G1CollectedHeap* _g1;
 166 public:
 167   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 168   bool do_object_b(oop p);
 169 };
 170 
 171 class SurvivorGCAllocRegion : public G1AllocRegion {
 172 protected:
 173   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 174   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 175 public:
 176   SurvivorGCAllocRegion()
 177   : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
 178 };
 179 
 180 class OldGCAllocRegion : public G1AllocRegion {
 181 protected:
 182   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 183   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 184 public:
 185   OldGCAllocRegion()
 186   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 187 };
 188 












 189 class RefineCardTableEntryClosure;
 190 
 191 class G1CollectedHeap : public SharedHeap {
 192   friend class VM_G1CollectForAllocation;
 193   friend class VM_G1CollectFull;
 194   friend class VM_G1IncCollectionPause;
 195   friend class VMStructs;
 196   friend class MutatorAllocRegion;
 197   friend class SurvivorGCAllocRegion;
 198   friend class OldGCAllocRegion;
 199 
 200   // Closures used in implementation.
 201   template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 202   friend class G1ParCopyClosure;
 203   friend class G1IsAliveClosure;
 204   friend class G1EvacuateFollowersClosure;
 205   friend class G1ParScanThreadState;
 206   friend class G1ParScanClosureSuper;
 207   friend class G1ParEvacuateFollowersClosure;
 208   friend class G1ParTask;


1515   // that this is always a safe operation, since it doesn't clear any
1516   // bits.
1517   void markModUnionRange(MemRegion mr);
1518 
1519   // Records the fact that a marking phase is no longer in progress.
1520   void set_marking_complete() {
1521     _mark_in_progress = false;
1522   }
1523   void set_marking_started() {
1524     _mark_in_progress = true;
1525   }
1526   bool mark_in_progress() {
1527     return _mark_in_progress;
1528   }
1529 
1530   // Print the maximum heap capacity.
1531   virtual size_t max_capacity() const;
1532 
1533   virtual jlong millis_since_last_gc();
1534 
1535   // Perform any cleanup actions necessary before allowing a verification.
1536   virtual void prepare_for_verify();
1537 
1538   // Perform verification.
1539 
1540   // vo == UsePrevMarking  -> use "prev" marking information,
1541   // vo == UseNextMarking -> use "next" marking information
1542   // vo == UseMarkWord    -> use the mark word in the object header
1543   //
1544   // NOTE: Only the "prev" marking information is guaranteed to be
1545   // consistent most of the time, so most calls to this should use
1546   // vo == UsePrevMarking.
1547   // Currently, there is only one case where this is called with
1548   // vo == UseNextMarking, which is to verify the "next" marking
1549   // information at the end of remark.
1550   // Currently there is only one place where this is called with
1551   // vo == UseMarkWord, which is to verify the marking during a
1552   // full GC.
1553   void verify(bool silent, VerifyOption vo);
1554 
1555   // Override; it uses the "prev" marking information
1556   virtual void verify(bool silent);
1557   virtual void print_on(outputStream* st) const;
1558   virtual void print_extended_on(outputStream* st) const;
1559   virtual void print_on_error(outputStream* st) const;
1560 
1561   virtual void print_gc_threads_on(outputStream* st) const;
1562   virtual void gc_threads_do(ThreadClosure* tc) const;
1563 
1564   // Override
1565   void print_tracing_info() const;
1566 
1567   // The following two methods are helpful for debugging RSet issues.
1568   void print_cset_rsets() PRODUCT_RETURN;
1569   void print_all_rsets() PRODUCT_RETURN;
1570 
1571   // Convenience function to be used in situations where the heap type can be
1572   // asserted to be this type.
1573   static G1CollectedHeap* heap();
1574 
1575   void set_region_short_lived_locked(HeapRegion* hr);
1576   // add appropriate methods for any other surv rate groups
1577 
1578   YoungList* young_list() { return _young_list; }
1579 
1580   // debugging
1581   bool check_young_list_well_formed() {
1582     return _young_list->check_list_well_formed();
1583   }
1584 
1585   bool check_young_list_empty(bool check_heap,
1586                               bool check_sample = true);
1587 
1588   // *** Stuff related to concurrent marking.  It's not clear to me that so
1589   // many of these need to be public.
1590 


1626   // Added if it is NULL it isn't dead.
1627 
1628   bool is_obj_dead(const oop obj) const {
1629     const HeapRegion* hr = heap_region_containing(obj);
1630     if (hr == NULL) {
1631       if (obj == NULL) return false;
1632       else return true;
1633     }
1634     else return is_obj_dead(obj, hr);
1635   }
1636 
1637   bool is_obj_ill(const oop obj) const {
1638     const HeapRegion* hr = heap_region_containing(obj);
1639     if (hr == NULL) {
1640       if (obj == NULL) return false;
1641       else return true;
1642     }
1643     else return is_obj_ill(obj, hr);
1644   }
1645 












































































1646   // The methods below are here for convenience and dispatch the
1647   // appropriate method depending on value of the given VerifyOption
1648   // parameter. The options for that parameter are:
1649   //
1650   // vo == UsePrevMarking -> use "prev" marking information,
1651   // vo == UseNextMarking -> use "next" marking information,
1652   // vo == UseMarkWord    -> use mark word from object header
1653 
1654   bool is_obj_dead_cond(const oop obj,
1655                         const HeapRegion* hr,
1656                         const VerifyOption vo) const {
1657     switch (vo) {
1658     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
1659     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
1660     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1661     default:                            ShouldNotReachHere();
1662     }
1663     return false; // keep some compilers happy
1664   }
1665 
1666   bool is_obj_dead_cond(const oop obj,
1667                         const VerifyOption vo) const {
1668     switch (vo) {
1669     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
1670     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
1671     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1672     default:                            ShouldNotReachHere();
1673     }
1674     return false; // keep some compilers happy
1675   }
1676 
1677   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1678   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1679   bool is_marked(oop obj, VerifyOption vo);
1680   const char* top_at_mark_start_str(VerifyOption vo);
1681 
1682   // The following is just to alert the verification code
1683   // that a full collection has occurred and that the
1684   // remembered sets are no longer up to date.
1685   bool _full_collection;
1686   void set_full_collection() { _full_collection = true;}
1687   void clear_full_collection() {_full_collection = false;}
1688   bool full_collection() {return _full_collection;}
1689 
1690   ConcurrentMark* concurrent_mark() const { return _cm; }
1691   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1692 
1693   // The dirty cards region list is used to record a subset of regions
1694   // whose cards need clearing. The list if populated during the
1695   // remembered set scanning and drained during the card table
1696   // cleanup. Although the methods are reentrant, population/draining
1697   // phases must not overlap. For synchronization purposes the last
1698   // element on the list points to itself.
1699   HeapRegion* _dirty_cards_region_list;
1700   void push_dirty_cards_region(HeapRegion* hr);
1701   HeapRegion* pop_dirty_cards_region();
1702 
1703 public:
1704   void stop_conc_gc_threads();
1705 
1706   size_t pending_card_num();
1707   size_t cards_scanned();
1708 
1709 protected:
1710   size_t _max_heap_capacity;
1711 };
1712 
1713 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1714 private:
1715   bool        _retired;
1716 
1717 public:
1718   G1ParGCAllocBuffer(size_t gclab_word_size);
1719 
1720   void set_buf(HeapWord* buf) {
1721     ParGCAllocBuffer::set_buf(buf);


   1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/g1/g1HRPrinter.hpp"
  31 #include "gc_implementation/g1/g1RemSet.hpp"
  32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  33 #include "gc_implementation/g1/heapRegionSeq.hpp"
  34 #include "gc_implementation/g1/heapRegionSets.hpp"
  35 #include "gc_implementation/shared/hSpaceCounters.hpp"
  36 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  37 #include "memory/barrierSet.hpp"
  38 #include "memory/memRegion.hpp"
  39 #include "memory/sharedHeap.hpp"
  40 #include "utilities/stack.hpp"
  41 
  42 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  43 // It uses the "Garbage First" heap organization and algorithm, which
  44 // may combine concurrent marking with parallel, incremental compaction of
  45 // heap subsets that will yield large amounts of garbage.
  46 
  47 // Forward declarations
  48 class HeapRegion;
  49 class HRRSCleanupTask;
  50 class GenerationSpec;
  51 class OopsInHeapRegionClosure;
  52 class G1KlassScanClosure;
  53 class G1ScanHeapEvacClosure;
  54 class ObjectClosure;
  55 class SpaceClosure;
  56 class CompactibleSpaceClosure;
  57 class Space;
  58 class G1CollectorPolicy;
  59 class GenRemSet;
  60 class G1RemSet;
  61 class HeapRegionRemSetIterator;
  62 class ConcurrentMark;
  63 class ConcurrentMarkThread;
  64 class ConcurrentG1Refine;
  65 class GenerationCounters;
  66 class nmethod;
  67 
  68 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
  69 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
  70 
  71 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
  72 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  73 
  74 enum GCAllocPurpose {
  75   GCAllocForTenured,
  76   GCAllocForSurvived,
  77   GCAllocPurposeCount
  78 };
  79 
  80 class YoungList : public CHeapObj<mtGC> {
  81 private:
  82   G1CollectedHeap* _g1h;
  83 
  84   HeapRegion* _head;
  85 
  86   HeapRegion* _survivor_head;


 141 
 142   HeapRegion* first_region() { return _head; }
 143   HeapRegion* first_survivor_region() { return _survivor_head; }
 144   HeapRegion* last_survivor_region() { return _survivor_tail; }
 145 
 146   // debugging
 147   bool          check_list_well_formed();
 148   bool          check_list_empty(bool check_sample = true);
 149   void          print();
 150 };
 151 
 152 class MutatorAllocRegion : public G1AllocRegion {
 153 protected:
 154   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 155   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 156 public:
 157   MutatorAllocRegion()
 158     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 159 };
 160 












 161 class SurvivorGCAllocRegion : public G1AllocRegion {
 162 protected:
 163   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 164   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 165 public:
 166   SurvivorGCAllocRegion()
 167   : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
 168 };
 169 
 170 class OldGCAllocRegion : public G1AllocRegion {
 171 protected:
 172   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 173   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 174 public:
 175   OldGCAllocRegion()
 176   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 177 };
 178 
 179 // The G1 STW is alive closure.
 180 // An instance is embedded into the G1CH and used as the
 181 // (optional) _is_alive_non_header closure in the STW
 182 // reference processor. It is also extensively used during
 183 // refence processing during STW evacuation pauses.
 184 class G1STWIsAliveClosure: public BoolObjectClosure {
 185   G1CollectedHeap* _g1;
 186 public:
 187   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 188   bool do_object_b(oop p);
 189 };
 190 
 191 class RefineCardTableEntryClosure;
 192 
 193 class G1CollectedHeap : public SharedHeap {
 194   friend class VM_G1CollectForAllocation;
 195   friend class VM_G1CollectFull;
 196   friend class VM_G1IncCollectionPause;
 197   friend class VMStructs;
 198   friend class MutatorAllocRegion;
 199   friend class SurvivorGCAllocRegion;
 200   friend class OldGCAllocRegion;
 201 
 202   // Closures used in implementation.
 203   template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 204   friend class G1ParCopyClosure;
 205   friend class G1IsAliveClosure;
 206   friend class G1EvacuateFollowersClosure;
 207   friend class G1ParScanThreadState;
 208   friend class G1ParScanClosureSuper;
 209   friend class G1ParEvacuateFollowersClosure;
 210   friend class G1ParTask;


1517   // that this is always a safe operation, since it doesn't clear any
1518   // bits.
1519   void markModUnionRange(MemRegion mr);
1520 
1521   // Records the fact that a marking phase is no longer in progress.
1522   void set_marking_complete() {
1523     _mark_in_progress = false;
1524   }
1525   void set_marking_started() {
1526     _mark_in_progress = true;
1527   }
1528   bool mark_in_progress() {
1529     return _mark_in_progress;
1530   }
1531 
1532   // Print the maximum heap capacity.
1533   virtual size_t max_capacity() const;
1534 
1535   virtual jlong millis_since_last_gc();
1536 




































1537   // Convenience function to be used in situations where the heap type can be
1538   // asserted to be this type.
1539   static G1CollectedHeap* heap();
1540 
1541   void set_region_short_lived_locked(HeapRegion* hr);
1542   // add appropriate methods for any other surv rate groups
1543 
1544   YoungList* young_list() { return _young_list; }
1545 
1546   // debugging
1547   bool check_young_list_well_formed() {
1548     return _young_list->check_list_well_formed();
1549   }
1550 
1551   bool check_young_list_empty(bool check_heap,
1552                               bool check_sample = true);
1553 
1554   // *** Stuff related to concurrent marking.  It's not clear to me that so
1555   // many of these need to be public.
1556 


1592   // Added if it is NULL it isn't dead.
1593 
1594   bool is_obj_dead(const oop obj) const {
1595     const HeapRegion* hr = heap_region_containing(obj);
1596     if (hr == NULL) {
1597       if (obj == NULL) return false;
1598       else return true;
1599     }
1600     else return is_obj_dead(obj, hr);
1601   }
1602 
1603   bool is_obj_ill(const oop obj) const {
1604     const HeapRegion* hr = heap_region_containing(obj);
1605     if (hr == NULL) {
1606       if (obj == NULL) return false;
1607       else return true;
1608     }
1609     else return is_obj_ill(obj, hr);
1610   }
1611 
1612   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1613   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1614   bool is_marked(oop obj, VerifyOption vo);
1615   const char* top_at_mark_start_str(VerifyOption vo);
1616 
1617   ConcurrentMark* concurrent_mark() const { return _cm; }
1618 
1619   // Refinement
1620 
1621   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1622 
1623   // The dirty cards region list is used to record a subset of regions
1624   // whose cards need clearing. The list if populated during the
1625   // remembered set scanning and drained during the card table
1626   // cleanup. Although the methods are reentrant, population/draining
1627   // phases must not overlap. For synchronization purposes the last
1628   // element on the list points to itself.
1629   HeapRegion* _dirty_cards_region_list;
1630   void push_dirty_cards_region(HeapRegion* hr);
1631   HeapRegion* pop_dirty_cards_region();
1632 
1633   // Optimized nmethod scanning support routines
1634 
1635   // Register the given nmethod with the G1 heap
1636   virtual void register_nmethod(nmethod* nm);
1637 
1638   // Unregister the given nmethod from the G1 heap
1639   virtual void unregister_nmethod(nmethod* nm);
1640 
1641   // Migrate the nmethods in the code root lists of the regions
1642   // in the collection set to regions in to-space. In the event
1643   // of an evacuation failure, nmethods that reference objects
1644   // that were not successfullly evacuated are not migrated.
1645   void migrate_strong_code_roots();
1646 
1647   // During an initial mark pause, mark all the code roots that
1648   // point into regions *not* in the collection set.
1649   void mark_strong_code_roots(uint worker_id);
1650 
1651   // Rebuild the stong code root lists for each region
1652   // after a full GC
1653   void rebuild_strong_code_roots();
1654 
1655   // Verification
1656 
1657   // The following is just to alert the verification code
1658   // that a full collection has occurred and that the
1659   // remembered sets are no longer up to date.
1660   bool _full_collection;
1661   void set_full_collection() { _full_collection = true;}
1662   void clear_full_collection() {_full_collection = false;}
1663   bool full_collection() {return _full_collection;}
1664 
1665   // Perform any cleanup actions necessary before allowing a verification.
1666   virtual void prepare_for_verify();
1667 
1668   // Perform verification.
1669 
1670   // vo == UsePrevMarking  -> use "prev" marking information,
1671   // vo == UseNextMarking -> use "next" marking information
1672   // vo == UseMarkWord    -> use the mark word in the object header
1673   //
1674   // NOTE: Only the "prev" marking information is guaranteed to be
1675   // consistent most of the time, so most calls to this should use
1676   // vo == UsePrevMarking.
1677   // Currently, there is only one case where this is called with
1678   // vo == UseNextMarking, which is to verify the "next" marking
1679   // information at the end of remark.
1680   // Currently there is only one place where this is called with
1681   // vo == UseMarkWord, which is to verify the marking during a
1682   // full GC.
1683   void verify(bool silent, VerifyOption vo);
1684 
1685   // Override; it uses the "prev" marking information
1686   virtual void verify(bool silent);
1687 
1688   // The methods below are here for convenience and dispatch the
1689   // appropriate method depending on value of the given VerifyOption
1690   // parameter. The values for that parameter, and their meanings,
1691   // are the same as those above.



1692 
1693   bool is_obj_dead_cond(const oop obj,
1694                         const HeapRegion* hr,
1695                         const VerifyOption vo) const {
1696     switch (vo) {
1697     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
1698     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
1699     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1700     default:                            ShouldNotReachHere();
1701     }
1702     return false; // keep some compilers happy
1703   }
1704 
1705   bool is_obj_dead_cond(const oop obj,
1706                         const VerifyOption vo) const {
1707     switch (vo) {
1708     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
1709     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
1710     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1711     default:                            ShouldNotReachHere();
1712     }
1713     return false; // keep some compilers happy
1714   }
1715 
1716   // Printing



1717 
1718   virtual void print_on(outputStream* st) const;
1719   virtual void print_extended_on(outputStream* st) const;
1720   virtual void print_on_error(outputStream* st) const;




1721 
1722   virtual void print_gc_threads_on(outputStream* st) const;
1723   virtual void gc_threads_do(ThreadClosure* tc) const;
1724 
1725   // Override
1726   void print_tracing_info() const;
1727 
1728   // The following two methods are helpful for debugging RSet issues.
1729   void print_cset_rsets() PRODUCT_RETURN;
1730   void print_all_rsets() PRODUCT_RETURN;



1731 
1732 public:
1733   void stop_conc_gc_threads();
1734 
1735   size_t pending_card_num();
1736   size_t cards_scanned();
1737 
1738 protected:
1739   size_t _max_heap_capacity;
1740 };
1741 
1742 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1743 private:
1744   bool        _retired;
1745 
1746 public:
1747   G1ParGCAllocBuffer(size_t gclab_word_size);
1748 
1749   void set_buf(HeapWord* buf) {
1750     ParGCAllocBuffer::set_buf(buf);