< prev index next >

src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp

Print this page




  39 
  40 class G1SATBCardTableModRefBS: public CardTableModRefBS {
  41   friend class VMStructs;
  42 protected:
  43   enum G1CardValues {
  44     g1_young_gen = CT_MR_BS_last_reserved << 1
  45   };
  46 
  47   G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
  48   ~G1SATBCardTableModRefBS() { }
  49 
  50 public:
  51   static int g1_young_card_val()   { return g1_young_gen; }
  52 
  53   // Add "pre_val" to a set of objects that may have been disconnected from the
  54   // pre-marking object graph.
  55   static void enqueue(oop pre_val);
  56 
  57   virtual bool has_write_ref_pre_barrier() { return true; }
  58 
  59   // This notes that we don't need to access any BarrierSet data
  60   // structures, so this can be called from a static context.
  61   template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
  62     T heap_oop = oopDesc::load_heap_oop(field);
  63     if (!oopDesc::is_null(heap_oop)) {
  64       enqueue(oopDesc::decode_heap_oop(heap_oop));
  65     }
  66   }
  67 
  68   // We export this to make it available in cases where the static
  69   // type of the barrier set is known.  Note that it is non-virtual.
  70   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
  71     write_ref_field_pre_static(field, newVal);
  72   }
  73 
  74   // These are the more general virtual versions.
  75   virtual void write_ref_field_pre_work(oop* field, oop new_val) {
  76     inline_write_ref_field_pre(field, new_val);
  77   }
  78   virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
  79     inline_write_ref_field_pre(field, new_val);
  80   }
  81   virtual void write_ref_field_pre_work(void* field, oop new_val) {
  82     guarantee(false, "Not needed");
  83   }
  84 
  85   template <class T> void write_ref_array_pre_work(T* dst, int count);
  86   virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
  87   virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
  88 
  89 /*
  90    Claimed and deferred bits are used together in G1 during the evacuation
  91    pause. These bits can have the following state transitions:
  92    1. The claimed bit can be put over any other card state. Except that
  93       the "dirty -> dirty and claimed" transition is checked for in


 155  protected:
 156   virtual void write_ref_field_work(void* field, oop new_val, bool release);
 157 
 158  public:
 159   static size_t compute_size(size_t mem_region_size_in_words) {
 160     size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
 161     return ReservedSpace::allocation_align_size_up(number_of_slots);
 162   }
 163 
 164   // Returns how many bytes of the heap a single byte of the Card Table corresponds to.
 165   static size_t heap_map_factor() {
 166     return CardTableModRefBS::card_size;
 167   }
 168 
 169   G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
 170 
 171   virtual void initialize() { }
 172   virtual void initialize(G1RegionToSpaceMapper* mapper);
 173 
 174   virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
 175 
 176   // Can be called from static contexts.
 177   static void write_ref_field_static(void* field, oop new_val);
 178 
 179   // NB: if you do a whole-heap invalidation, the "usual invariant" defined
 180   // above no longer applies.
 181   void invalidate(MemRegion mr, bool whole_heap = false);
 182 
 183   void write_region_work(MemRegion mr)    { invalidate(mr); }
 184   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
 185 };
 186 
 187 template<>
 188 struct BarrierSet::GetName<G1SATBCardTableLoggingModRefBS> {
 189   static const BarrierSet::Name value = BarrierSet::G1SATBCTLogging;
 190 };
 191 
 192 #endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP


  39 
  40 class G1SATBCardTableModRefBS: public CardTableModRefBS {
  41   friend class VMStructs;
  42 protected:
  43   enum G1CardValues {
  44     g1_young_gen = CT_MR_BS_last_reserved << 1
  45   };
  46 
  47   G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
  48   ~G1SATBCardTableModRefBS() { }
  49 
  50 public:
  51   static int g1_young_card_val()   { return g1_young_gen; }
  52 
  53   // Add "pre_val" to a set of objects that may have been disconnected from the
  54   // pre-marking object graph.
  55   static void enqueue(oop pre_val);
  56 
  57   virtual bool has_write_ref_pre_barrier() { return true; }
  58 
  59   // We export this to make it available in cases where the static
  60   // type of the barrier set is known.  Note that it is non-virtual.
  61   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
  62     T heap_oop = oopDesc::load_heap_oop(field);
  63     if (!oopDesc::is_null(heap_oop)) {
  64       enqueue(oopDesc::decode_heap_oop(heap_oop));
  65     }
  66   }
  67 






  68   // These are the more general virtual versions.
  69   virtual void write_ref_field_pre_work(oop* field, oop new_val) {
  70     inline_write_ref_field_pre(field, new_val);
  71   }
  72   virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
  73     inline_write_ref_field_pre(field, new_val);
  74   }
  75   virtual void write_ref_field_pre_work(void* field, oop new_val) {
  76     guarantee(false, "Not needed");
  77   }
  78 
  79   template <class T> void write_ref_array_pre_work(T* dst, int count);
  80   virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
  81   virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
  82 
  83 /*
  84    Claimed and deferred bits are used together in G1 during the evacuation
  85    pause. These bits can have the following state transitions:
  86    1. The claimed bit can be put over any other card state. Except that
  87       the "dirty -> dirty and claimed" transition is checked for in


 149  protected:
 150   virtual void write_ref_field_work(void* field, oop new_val, bool release);
 151 
 152  public:
 153   static size_t compute_size(size_t mem_region_size_in_words) {
 154     size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
 155     return ReservedSpace::allocation_align_size_up(number_of_slots);
 156   }
 157 
 158   // Returns how many bytes of the heap a single byte of the Card Table corresponds to.
 159   static size_t heap_map_factor() {
 160     return CardTableModRefBS::card_size;
 161   }
 162 
 163   G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
 164 
 165   virtual void initialize() { }
 166   virtual void initialize(G1RegionToSpaceMapper* mapper);
 167 
 168   virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }



 169 
 170   // NB: if you do a whole-heap invalidation, the "usual invariant" defined
 171   // above no longer applies.
 172   void invalidate(MemRegion mr, bool whole_heap = false);
 173 
 174   void write_region_work(MemRegion mr)    { invalidate(mr); }
 175   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
 176 };
 177 
 178 template<>
 179 struct BarrierSet::GetName<G1SATBCardTableLoggingModRefBS> {
 180   static const BarrierSet::Name value = BarrierSet::G1SATBCTLogging;
 181 };
 182 
 183 #endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP
< prev index next >