< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 12906 : [mq]: gc_interface

@@ -26,21 +26,22 @@
 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
 
 #include "gc/g1/evacuationInfo.hpp"
 #include "gc/g1/g1AllocationContext.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1EdenRegions.hpp"
 #include "gc/g1/g1EvacFailure.hpp"
 #include "gc/g1/g1EvacStats.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1HRPrinter.hpp"
 #include "gc/g1/g1InCSetState.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1SurvivorRegions.hpp"
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/hSpaceCounters.hpp"
 #include "gc/g1/heapRegionManager.hpp"
 #include "gc/g1/heapRegionSet.hpp"

@@ -957,10 +958,12 @@
   // maximum sizes and remembered and barrier sets
   // specified by the policy object.
   jint initialize();
 
   virtual void stop();
+  virtual void safepoint_synchronize_begin();
+  virtual void safepoint_synchronize_end();
 
   // Return the (conservative) maximum heap alignment for any G1 heap
   static size_t conservative_max_heap_alignment();
 
   // Does operations required after initialization has been done.

@@ -1159,12 +1162,16 @@
     return _hrm.reserved();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
 
-  G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
-    return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
+  G1BarrierSet* g1_barrier_set() {
+    return barrier_set_cast<G1BarrierSet>(barrier_set());
+  }
+
+  G1CardTable* g1_card_table() {
+    return static_cast<G1CardTable*>(g1_barrier_set()->card_table());
   }
 
   // Iteration functions.
 
   // Iterate over all objects, calling "cl.do_object" on each.

@@ -1250,42 +1257,14 @@
   size_t tlab_capacity(Thread* ignored) const;
   size_t tlab_used(Thread* ignored) const;
   size_t max_tlab_size() const;
   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
 
-  // Can a compiler initialize a new object without store barriers?
-  // This permission only extends from the creation of a new object
-  // via a TLAB up to the first subsequent safepoint. If such permission
-  // is granted for this heap type, the compiler promises to call
-  // defer_store_barrier() below on any slow path allocation of
-  // a new object for which such initializing store barriers will
-  // have been elided. G1, like CMS, allows this, but should be
-  // ready to provide a compensating write barrier as necessary
-  // if that storage came out of a non-young region. The efficiency
-  // of this implementation depends crucially on being able to
-  // answer very efficiently in constant time whether a piece of
-  // storage in the heap comes from a young region or not.
-  // See ReduceInitialCardMarks.
-  virtual bool can_elide_tlab_store_barriers() const {
-    return true;
-  }
-
-  virtual bool card_mark_must_follow_store() const {
-    return true;
-  }
-
   inline bool is_in_young(const oop obj);
 
   virtual bool is_scavengable(const void* addr);
 
-  // We don't need barriers for initializing stores to objects
-  // in the young gen: for the SATB pre-barrier, there is no
-  // pre-value that needs to be remembered; for the remembered-set
-  // update logging post-barrier, we don't maintain remembered set
-  // information for young gen objects.
-  virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
-
   // Returns "true" iff the given word_size is "very large".
   static bool is_humongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
     // are capped at the humongous threshold and we want to
     // ensure that we don't try to allocate a TLAB as

@@ -1441,10 +1420,12 @@
   // WhiteBox testing support.
   virtual bool supports_concurrent_phase_control() const;
   virtual const char* const* concurrent_phases() const;
   virtual bool request_concurrent_phase(const char* phase);
 
+  void verify_nmethod_roots(nmethod* nmethod);
+
   // The methods below are here for convenience and dispatch the
   // appropriate method depending on value of the given VerifyOption
   // parameter. The values for that parameter, and their meanings,
   // are the same as those above.
 
< prev index next >