< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 12906 : [mq]: gc_interface


  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/evacuationInfo.hpp"
  29 #include "gc/g1/g1AllocationContext.hpp"
  30 #include "gc/g1/g1BiasedArray.hpp"

  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1HeapVerifier.hpp"
  38 #include "gc/g1/g1HRPrinter.hpp"
  39 #include "gc/g1/g1InCSetState.hpp"
  40 #include "gc/g1/g1MonitoringSupport.hpp"
  41 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  42 #include "gc/g1/g1SurvivorRegions.hpp"
  43 #include "gc/g1/g1YCTypes.hpp"
  44 #include "gc/g1/hSpaceCounters.hpp"
  45 #include "gc/g1/heapRegionManager.hpp"
  46 #include "gc/g1/heapRegionSet.hpp"
  47 #include "gc/shared/barrierSet.hpp"
  48 #include "gc/shared/collectedHeap.hpp"
  49 #include "gc/shared/plab.hpp"
  50 #include "gc/shared/preservedMarks.hpp"
  51 #include "memory/memRegion.hpp"
  52 #include "utilities/stack.hpp"
  53 
  54 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  55 // It uses the "Garbage First" heap organization and algorithm, which
  56 // may combine concurrent marking with parallel, incremental compaction of
  57 // heap subsets that will yield large amounts of garbage.
  58 
  59 // Forward declarations
  60 class HeapRegion;
  61 class HRRSCleanupTask;


 942   void set_refine_cte_cl_concurrency(bool concurrent);
 943 
 944   RefToScanQueue *task_queue(uint i) const;
 945 
 946   uint num_task_queues() const;
 947 
 948   // A set of cards where updates happened during the GC
 949   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 950 
 951   // Create a G1CollectedHeap with the specified policy.
 952   // Must call the initialize method afterwards.
 953   // May not return if something goes wrong.
 954   G1CollectedHeap(G1CollectorPolicy* policy);
 955 
 956   // Initialize the G1CollectedHeap to have the initial and
 957   // maximum sizes and remembered and barrier sets
 958   // specified by the policy object.
 959   jint initialize();
 960 
 961   virtual void stop();


 962 
 963   // Return the (conservative) maximum heap alignment for any G1 heap
 964   static size_t conservative_max_heap_alignment();
 965 
 966   // Does operations required after initialization has been done.
 967   void post_initialize();
 968 
 969   // Initialize weak reference processing.
 970   void ref_processing_init();
 971 
 972   virtual Name kind() const {
 973     return CollectedHeap::G1CollectedHeap;
 974   }
 975 
 976   virtual const char* name() const {
 977     return "G1";
 978   }
 979 
 980   const G1CollectorState* collector_state() const { return &_collector_state; }
 981   G1CollectorState* collector_state() { return &_collector_state; }


1144   G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1145 
1146  public:
1147 
1148   inline InCSetState in_cset_state(const oop obj);
1149 
1150   // Return "TRUE" iff the given object address is in the reserved
1151   // region of g1.
1152   bool is_in_g1_reserved(const void* p) const {
1153     return _hrm.reserved().contains(p);
1154   }
1155 
1156   // Returns a MemRegion that corresponds to the space that has been
1157   // reserved for the heap
1158   MemRegion g1_reserved() const {
1159     return _hrm.reserved();
1160   }
1161 
1162   virtual bool is_in_closed_subset(const void* p) const;
1163 
1164   G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1165     return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());




1166   }
1167 
1168   // Iteration functions.
1169 
1170   // Iterate over all objects, calling "cl.do_object" on each.
1171   virtual void object_iterate(ObjectClosure* cl);
1172 
1173   virtual void safe_object_iterate(ObjectClosure* cl) {
1174     object_iterate(cl);
1175   }
1176 
1177   // Iterate over heap regions, in address order, terminating the
1178   // iteration early if the "doHeapRegion" method returns "true".
1179   void heap_region_iterate(HeapRegionClosure* blk) const;
1180 
1181   // Return the region with the given index. It assumes the index is valid.
1182   inline HeapRegion* region_at(uint index) const;
1183 
1184   // Return the next region (by index) that is part of the same
1185   // humongous object that hr is part of.


1235   virtual HeapWord* block_start(const void* addr) const;
1236 
1237   // Requires "addr" to be the start of a chunk, and returns its size.
1238   // "addr + size" is required to be the start of a new chunk, or the end
1239   // of the active area of the heap.
1240   virtual size_t block_size(const HeapWord* addr) const;
1241 
1242   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1243   // the block is an object.
1244   virtual bool block_is_obj(const HeapWord* addr) const;
1245 
1246   // Section on thread-local allocation buffers (TLABs)
1247   // See CollectedHeap for semantics.
1248 
1249   bool supports_tlab_allocation() const;
1250   size_t tlab_capacity(Thread* ignored) const;
1251   size_t tlab_used(Thread* ignored) const;
1252   size_t max_tlab_size() const;
1253   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1254 
1255   // Can a compiler initialize a new object without store barriers?
1256   // This permission only extends from the creation of a new object
1257   // via a TLAB up to the first subsequent safepoint. If such permission
1258   // is granted for this heap type, the compiler promises to call
1259   // defer_store_barrier() below on any slow path allocation of
1260   // a new object for which such initializing store barriers will
1261   // have been elided. G1, like CMS, allows this, but should be
1262   // ready to provide a compensating write barrier as necessary
1263   // if that storage came out of a non-young region. The efficiency
1264   // of this implementation depends crucially on being able to
1265   // answer very efficiently in constant time whether a piece of
1266   // storage in the heap comes from a young region or not.
1267   // See ReduceInitialCardMarks.
1268   virtual bool can_elide_tlab_store_barriers() const {
1269     return true;
1270   }
1271 
1272   virtual bool card_mark_must_follow_store() const {
1273     return true;
1274   }
1275 
1276   inline bool is_in_young(const oop obj);
1277 
1278   virtual bool is_scavengable(const void* addr);
1279 
1280   // We don't need barriers for initializing stores to objects
1281   // in the young gen: for the SATB pre-barrier, there is no
1282   // pre-value that needs to be remembered; for the remembered-set
1283   // update logging post-barrier, we don't maintain remembered set
1284   // information for young gen objects.
1285   virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1286 
1287   // Returns "true" iff the given word_size is "very large".
1288   static bool is_humongous(size_t word_size) {
1289     // Note this has to be strictly greater-than as the TLABs
1290     // are capped at the humongous threshold and we want to
1291     // ensure that we don't try to allocate a TLAB as
1292     // humongous and that we don't allocate a humongous
1293     // object in a TLAB.
1294     return word_size > _humongous_object_threshold_in_words;
1295   }
1296 
1297   // Returns the humongous threshold for a specific region size
1298   static size_t humongous_threshold_for(size_t region_size) {
1299     return (region_size / 2);
1300   }
1301 
1302   // Returns the number of regions the humongous object of the given word size
1303   // requires.
1304   static size_t humongous_obj_size_in_regions(size_t word_size);
1305 
1306   // Print the maximum heap capacity.


1425 
1426   // vo == UsePrevMarking  -> use "prev" marking information,
1427   // vo == UseNextMarking -> use "next" marking information
1428   // vo == UseMarkWord    -> use the mark word in the object header
1429   //
1430   // NOTE: Only the "prev" marking information is guaranteed to be
1431   // consistent most of the time, so most calls to this should use
1432   // vo == UsePrevMarking.
1433   // Currently, there is only one case where this is called with
1434   // vo == UseNextMarking, which is to verify the "next" marking
1435   // information at the end of remark.
1436   // Currently there is only one place where this is called with
1437   // vo == UseMarkWord, which is to verify the marking during a
1438   // full GC.
1439   void verify(VerifyOption vo);
1440 
1441   // WhiteBox testing support.
1442   virtual bool supports_concurrent_phase_control() const;
1443   virtual const char* const* concurrent_phases() const;
1444   virtual bool request_concurrent_phase(const char* phase);


1445 
1446   // The methods below are here for convenience and dispatch the
1447   // appropriate method depending on value of the given VerifyOption
1448   // parameter. The values for that parameter, and their meanings,
1449   // are the same as those above.
1450 
1451   bool is_obj_dead_cond(const oop obj,
1452                         const HeapRegion* hr,
1453                         const VerifyOption vo) const;
1454 
1455   bool is_obj_dead_cond(const oop obj,
1456                         const VerifyOption vo) const;
1457 
1458   G1HeapSummary create_g1_heap_summary();
1459   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1460 
1461   // Printing
1462 private:
1463   void print_heap_regions() const;
1464   void print_regions_on(outputStream* st) const;




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/evacuationInfo.hpp"
  29 #include "gc/g1/g1AllocationContext.hpp"
  30 #include "gc/g1/g1BiasedArray.hpp"
  31 #include "gc/g1/g1CardTable.hpp"
  32 #include "gc/g1/g1CollectionSet.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacFailure.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"
  38 #include "gc/g1/g1HeapVerifier.hpp"
  39 #include "gc/g1/g1HRPrinter.hpp"
  40 #include "gc/g1/g1InCSetState.hpp"
  41 #include "gc/g1/g1MonitoringSupport.hpp"
  42 #include "gc/g1/g1BarrierSet.hpp"
  43 #include "gc/g1/g1SurvivorRegions.hpp"
  44 #include "gc/g1/g1YCTypes.hpp"
  45 #include "gc/g1/hSpaceCounters.hpp"
  46 #include "gc/g1/heapRegionManager.hpp"
  47 #include "gc/g1/heapRegionSet.hpp"
  48 #include "gc/shared/barrierSet.hpp"
  49 #include "gc/shared/collectedHeap.hpp"
  50 #include "gc/shared/plab.hpp"
  51 #include "gc/shared/preservedMarks.hpp"
  52 #include "memory/memRegion.hpp"
  53 #include "utilities/stack.hpp"
  54 
  55 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  56 // It uses the "Garbage First" heap organization and algorithm, which
  57 // may combine concurrent marking with parallel, incremental compaction of
  58 // heap subsets that will yield large amounts of garbage.
  59 
  60 // Forward declarations
  61 class HeapRegion;
  62 class HRRSCleanupTask;


 943   void set_refine_cte_cl_concurrency(bool concurrent);
 944 
 945   RefToScanQueue *task_queue(uint i) const;
 946 
 947   uint num_task_queues() const;
 948 
 949   // A set of cards where updates happened during the GC
 950   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 951 
 952   // Create a G1CollectedHeap with the specified policy.
 953   // Must call the initialize method afterwards.
 954   // May not return if something goes wrong.
 955   G1CollectedHeap(G1CollectorPolicy* policy);
 956 
 957   // Initialize the G1CollectedHeap to have the initial and
 958   // maximum sizes and remembered and barrier sets
 959   // specified by the policy object.
 960   jint initialize();
 961 
 962   virtual void stop();
 963   virtual void safepoint_synchronize_begin();
 964   virtual void safepoint_synchronize_end();
 965 
 966   // Return the (conservative) maximum heap alignment for any G1 heap
 967   static size_t conservative_max_heap_alignment();
 968 
 969   // Does operations required after initialization has been done.
 970   void post_initialize();
 971 
 972   // Initialize weak reference processing.
 973   void ref_processing_init();
 974 
 975   virtual Name kind() const {
 976     return CollectedHeap::G1CollectedHeap;
 977   }
 978 
 979   virtual const char* name() const {
 980     return "G1";
 981   }
 982 
 983   const G1CollectorState* collector_state() const { return &_collector_state; }
 984   G1CollectorState* collector_state() { return &_collector_state; }


1147   G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1148 
1149  public:
1150 
1151   inline InCSetState in_cset_state(const oop obj);
1152 
1153   // Return "TRUE" iff the given object address is in the reserved
1154   // region of g1.
1155   bool is_in_g1_reserved(const void* p) const {
1156     return _hrm.reserved().contains(p);
1157   }
1158 
1159   // Returns a MemRegion that corresponds to the space that has been
1160   // reserved for the heap
1161   MemRegion g1_reserved() const {
1162     return _hrm.reserved();
1163   }
1164 
1165   virtual bool is_in_closed_subset(const void* p) const;
1166 
1167   G1BarrierSet* g1_barrier_set() {
1168     return barrier_set_cast<G1BarrierSet>(barrier_set());
1169   }
1170 
1171   G1CardTable* g1_card_table() {
1172     return static_cast<G1CardTable*>(g1_barrier_set()->card_table());
1173   }
1174 
1175   // Iteration functions.
1176 
1177   // Iterate over all objects, calling "cl.do_object" on each.
1178   virtual void object_iterate(ObjectClosure* cl);
1179 
1180   virtual void safe_object_iterate(ObjectClosure* cl) {
1181     object_iterate(cl);
1182   }
1183 
1184   // Iterate over heap regions, in address order, terminating the
1185   // iteration early if the "doHeapRegion" method returns "true".
1186   void heap_region_iterate(HeapRegionClosure* blk) const;
1187 
1188   // Return the region with the given index. It assumes the index is valid.
1189   inline HeapRegion* region_at(uint index) const;
1190 
1191   // Return the next region (by index) that is part of the same
1192   // humongous object that hr is part of.


1242   virtual HeapWord* block_start(const void* addr) const;
1243 
1244   // Requires "addr" to be the start of a chunk, and returns its size.
1245   // "addr + size" is required to be the start of a new chunk, or the end
1246   // of the active area of the heap.
1247   virtual size_t block_size(const HeapWord* addr) const;
1248 
1249   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1250   // the block is an object.
1251   virtual bool block_is_obj(const HeapWord* addr) const;
1252 
1253   // Section on thread-local allocation buffers (TLABs)
1254   // See CollectedHeap for semantics.
1255 
1256   bool supports_tlab_allocation() const;
1257   size_t tlab_capacity(Thread* ignored) const;
1258   size_t tlab_used(Thread* ignored) const;
1259   size_t max_tlab_size() const;
1260   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1261 





















1262   inline bool is_in_young(const oop obj);
1263 
1264   virtual bool is_scavengable(const void* addr);
1265 







1266   // Returns "true" iff the given word_size is "very large".
1267   static bool is_humongous(size_t word_size) {
1268     // Note this has to be strictly greater-than as the TLABs
1269     // are capped at the humongous threshold and we want to
1270     // ensure that we don't try to allocate a TLAB as
1271     // humongous and that we don't allocate a humongous
1272     // object in a TLAB.
1273     return word_size > _humongous_object_threshold_in_words;
1274   }
1275 
1276   // Returns the humongous threshold for a specific region size
1277   static size_t humongous_threshold_for(size_t region_size) {
1278     return (region_size / 2);
1279   }
1280 
1281   // Returns the number of regions the humongous object of the given word size
1282   // requires.
1283   static size_t humongous_obj_size_in_regions(size_t word_size);
1284 
1285   // Print the maximum heap capacity.


1404 
1405   // vo == UsePrevMarking  -> use "prev" marking information,
1406   // vo == UseNextMarking -> use "next" marking information
1407   // vo == UseMarkWord    -> use the mark word in the object header
1408   //
1409   // NOTE: Only the "prev" marking information is guaranteed to be
1410   // consistent most of the time, so most calls to this should use
1411   // vo == UsePrevMarking.
1412   // Currently, there is only one case where this is called with
1413   // vo == UseNextMarking, which is to verify the "next" marking
1414   // information at the end of remark.
1415   // Currently there is only one place where this is called with
1416   // vo == UseMarkWord, which is to verify the marking during a
1417   // full GC.
1418   void verify(VerifyOption vo);
1419 
1420   // WhiteBox testing support.
1421   virtual bool supports_concurrent_phase_control() const;
1422   virtual const char* const* concurrent_phases() const;
1423   virtual bool request_concurrent_phase(const char* phase);
1424 
1425   void verify_nmethod_roots(nmethod* nmethod);
1426 
1427   // The methods below are here for convenience and dispatch the
1428   // appropriate method depending on value of the given VerifyOption
1429   // parameter. The values for that parameter, and their meanings,
1430   // are the same as those above.
1431 
1432   bool is_obj_dead_cond(const oop obj,
1433                         const HeapRegion* hr,
1434                         const VerifyOption vo) const;
1435 
1436   bool is_obj_dead_cond(const oop obj,
1437                         const VerifyOption vo) const;
1438 
1439   G1HeapSummary create_g1_heap_summary();
1440   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1441 
1442   // Printing
1443 private:
1444   void print_heap_regions() const;
1445   void print_regions_on(outputStream* st) const;


< prev index next >