src/share/vm/memory/cardTableModRefBS.hpp

Print this page
rev 6805 : imported patch commit-uncommit-within-heap


  79   static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
  80     return cv == dirty_card || cv == precleaned_card;
  81   }
  82 
  83   // Returns "true" iff the value "cv" will cause the card containing it
  84   // to be scanned in the current traversal.  May be overridden by
  85   // subtypes.
  86   virtual bool card_will_be_scanned(jbyte cv) {
  87     return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
  88   }
  89 
  90   // Returns "true" iff the value "cv" may have represented a dirty card at
  91   // some point.
  92   virtual bool card_may_have_been_dirty(jbyte cv) {
  93     return card_is_dirty_wrt_gen_iter(cv);
  94   }
  95 
  96   // The declaration order of these const fields is important; see the
  97   // constructor before changing.
  98   const MemRegion _whole_heap;       // the region covered by the card table
  99   const size_t    _guard_index;      // index of very last element in the card
 100                                      // table; it is set to a guard value
 101                                      // (last_card) and should never be modified
 102   const size_t    _last_valid_index; // index of the last valid element
 103   const size_t    _page_size;        // page size used when mapping _byte_map
 104   const size_t    _byte_map_size;    // in bytes
 105   jbyte*          _byte_map;         // the card marking array
 106 
 107   int _cur_covered_regions;
 108   // The covered regions should be in address order.
 109   MemRegion* _covered;
 110   // The committed regions correspond one-to-one to the covered regions.
 111   // They represent the card-table memory that has been committed to service
 112   // the corresponding covered region.  It may be that committed region for
 113   // one covered region corresponds to a larger region because of page-size
 114   // roundings.  Thus, a committed region for one covered region may
 115   // actually extend onto the card-table space for the next covered region.
 116   MemRegion* _committed;
 117 
 118   // The last card is a guard card, and we commit the page for it so
 119   // we can use the card for verification purposes. We make sure we never
 120   // uncommit the MemRegion for that page.
 121   MemRegion _guard_region;
 122 
 123  protected:
 124   // Initialization utilities; covered_words is the size of the covered region
 125   // in, um, words.
 126   inline size_t cards_required(size_t covered_words);





 127   inline size_t compute_byte_map_size();
 128 
 129   // Finds and return the index of the region, if any, to which the given
 130   // region would be contiguous.  If none exists, assign a new region and
 131   // returns its index.  Requires that no more than the maximum number of
 132   // covered regions defined in the constructor are ever in use.
 133   int find_covering_region_by_base(HeapWord* base);
 134 
 135   // Same as above, but finds the region containing the given address
 136   // instead of starting at a given base address.
 137   int find_covering_region_containing(HeapWord* addr);
 138 
 139   // Resize one of the regions covered by the remembered set.
 140   void resize_covered_region(MemRegion new_region);
 141 
 142   // Returns the leftmost end of a committed region corresponding to a
 143   // covered region before covered region "ind", or else "NULL" if "ind" is
 144   // the first covered region.
 145   HeapWord* largest_prev_committed_end(int ind) const;
 146 
 147   // Returns the part of the region mr that doesn't intersect with
 148   // any committed region other than self.  Used to prevent uncommitting
 149   // regions that are also committed by other regions.  Also protects
 150   // against uncommitting the guard region.
 151   MemRegion committed_unique_to_self(int self, MemRegion mr) const;
 152 
 153   // Mapping from address to card marking array entry
 154   jbyte* byte_for(const void* p) const {
 155     assert(_whole_heap.contains(p),
 156            err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of "
 157                    " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")",
 158                    p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())));
 159     jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
 160     assert(result >= _byte_map && result < _byte_map + _byte_map_size,


 264   enum SomePublicConstants {
 265     card_shift                  = 9,
 266     card_size                   = 1 << card_shift,
 267     card_size_in_words          = card_size / sizeof(HeapWord)
 268   };
 269 
 270   static int clean_card_val()      { return clean_card; }
 271   static int clean_card_mask_val() { return clean_card_mask; }
 272   static int dirty_card_val()      { return dirty_card; }
 273   static int claimed_card_val()    { return claimed_card; }
 274   static int precleaned_card_val() { return precleaned_card; }
 275   static int deferred_card_val()   { return deferred_card; }
 276 
 277   // For RTTI simulation.
 278   bool is_a(BarrierSet::Name bsn) {
 279     return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
 280   }
 281 
 282   CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
 283   ~CardTableModRefBS();


 284 
 285   // *** Barrier set functions.
 286 
 287   bool has_write_ref_pre_barrier() { return false; }
 288 
 289   // Record a reference update. Note that these versions are precise!
 290   // The scanning code has to handle the fact that the write barrier may be
 291   // either precise or imprecise. We make non-virtual inline variants of
 292   // these functions here for performance.
 293 protected:
 294   void write_ref_field_work(oop obj, size_t offset, oop newVal);
 295   virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
 296 public:
 297 
 298   bool has_write_ref_array_opt() { return true; }
 299   bool has_write_region_opt() { return true; }
 300 
 301   inline void inline_write_region(MemRegion mr) {
 302     dirty_MemRegion(mr);
 303   }




  79   static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
  80     return cv == dirty_card || cv == precleaned_card;
  81   }
  82 
  83   // Returns "true" iff the value "cv" will cause the card containing it
  84   // to be scanned in the current traversal.  May be overridden by
  85   // subtypes.
  86   virtual bool card_will_be_scanned(jbyte cv) {
  87     return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
  88   }
  89 
  90   // Returns "true" iff the value "cv" may have represented a dirty card at
  91   // some point.
  92   virtual bool card_may_have_been_dirty(jbyte cv) {
  93     return card_is_dirty_wrt_gen_iter(cv);
  94   }
  95 
  96   // The declaration order of these const fields is important; see the
  97   // constructor before changing.
  98   const MemRegion _whole_heap;       // the region covered by the card table
  99   size_t          _guard_index;      // index of very last element in the card
 100                                      // table; it is set to a guard value
 101                                      // (last_card) and should never be modified
 102   size_t          _last_valid_index; // index of the last valid element
 103   const size_t    _page_size;        // page size used when mapping _byte_map
 104   size_t          _byte_map_size;    // in bytes
 105   jbyte*          _byte_map;         // the card marking array
 106 
 107   int _cur_covered_regions;
 108   // The covered regions should be in address order.
 109   MemRegion* _covered;
 110   // The committed regions correspond one-to-one to the covered regions.
 111   // They represent the card-table memory that has been committed to service
 112   // the corresponding covered region.  It may be that committed region for
 113   // one covered region corresponds to a larger region because of page-size
 114   // roundings.  Thus, a committed region for one covered region may
 115   // actually extend onto the card-table space for the next covered region.
 116   MemRegion* _committed;
 117 
 118   // The last card is a guard card, and we commit the page for it so
 119   // we can use the card for verification purposes. We make sure we never
 120   // uncommit the MemRegion for that page.
 121   MemRegion _guard_region;
 122 
 123  protected:
 124   // Initialization utilities; covered_words is the size of the covered region
 125   // in, um, words.
 126   inline size_t cards_required(size_t covered_words) {
 127     // Add one for a guard card, used to detect errors.
 128     const size_t words = align_size_up(covered_words, card_size_in_words);
 129     return words / card_size_in_words + 1;
 130   }
 131 
 132   inline size_t compute_byte_map_size();
 133 
 134   // Finds and return the index of the region, if any, to which the given
 135   // region would be contiguous.  If none exists, assign a new region and
 136   // returns its index.  Requires that no more than the maximum number of
 137   // covered regions defined in the constructor are ever in use.
 138   int find_covering_region_by_base(HeapWord* base);
 139 
 140   // Same as above, but finds the region containing the given address
 141   // instead of starting at a given base address.
 142   int find_covering_region_containing(HeapWord* addr);
 143 
 144   // Resize one of the regions covered by the remembered set.
 145   virtual void resize_covered_region(MemRegion new_region);
 146 
 147   // Returns the leftmost end of a committed region corresponding to a
 148   // covered region before covered region "ind", or else "NULL" if "ind" is
 149   // the first covered region.
 150   HeapWord* largest_prev_committed_end(int ind) const;
 151 
 152   // Returns the part of the region mr that doesn't intersect with
 153   // any committed region other than self.  Used to prevent uncommitting
 154   // regions that are also committed by other regions.  Also protects
 155   // against uncommitting the guard region.
 156   MemRegion committed_unique_to_self(int self, MemRegion mr) const;
 157 
 158   // Mapping from address to card marking array entry
 159   jbyte* byte_for(const void* p) const {
 160     assert(_whole_heap.contains(p),
 161            err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of "
 162                    " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")",
 163                    p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())));
 164     jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
 165     assert(result >= _byte_map && result < _byte_map + _byte_map_size,


 269   enum SomePublicConstants {
 270     card_shift                  = 9,
 271     card_size                   = 1 << card_shift,
 272     card_size_in_words          = card_size / sizeof(HeapWord)
 273   };
 274 
 275   static int clean_card_val()      { return clean_card; }
 276   static int clean_card_mask_val() { return clean_card_mask; }
 277   static int dirty_card_val()      { return dirty_card; }
 278   static int claimed_card_val()    { return claimed_card; }
 279   static int precleaned_card_val() { return precleaned_card; }
 280   static int deferred_card_val()   { return deferred_card; }
 281 
 282   // For RTTI simulation.
 283   bool is_a(BarrierSet::Name bsn) {
 284     return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
 285   }
 286 
 287   CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
 288   ~CardTableModRefBS();
 289 
 290   virtual void initialize();
 291 
 292   // *** Barrier set functions.
 293 
 294   bool has_write_ref_pre_barrier() { return false; }
 295 
 296   // Record a reference update. Note that these versions are precise!
 297   // The scanning code has to handle the fact that the write barrier may be
 298   // either precise or imprecise. We make non-virtual inline variants of
 299   // these functions here for performance.
 300 protected:
 301   void write_ref_field_work(oop obj, size_t offset, oop newVal);
 302   virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
 303 public:
 304 
 305   bool has_write_ref_array_opt() { return true; }
 306   bool has_write_region_opt() { return true; }
 307 
 308   inline void inline_write_region(MemRegion mr) {
 309     dirty_MemRegion(mr);
 310   }