< prev index next >

src/share/vm/gc/g1/g1BlockOffsetTable.hpp

Print this page
rev 8978 : imported patch remove_err_msg


  63 
  64   // The space this table is covering.
  65   HeapWord* _bottom;    // == reserved.start
  66   HeapWord* _end;       // End of currently allocated region.
  67 
  68 public:
  69   // Initialize the table to cover the given space.
  70   // The contents of the initial table are undefined.
  71   G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
  72     _bottom(bottom), _end(end)
  73     {
  74       assert(_bottom <= _end, "arguments out of order");
  75     }
  76 
  77   // Note that the committed size of the covered space may have changed,
  78   // so the table size might also wish to change.
  79   virtual void resize(size_t new_word_size) = 0;
  80 
  81   virtual void set_bottom(HeapWord* new_bottom) {
  82     assert(new_bottom <= _end,
  83            err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
  84                    p2i(new_bottom), p2i(_end)));
  85     _bottom = new_bottom;
  86     resize(pointer_delta(_end, _bottom));
  87   }
  88 
  89   // Requires "addr" to be contained by a block, and returns the address of
  90   // the start of that block.  (May have side effects, namely updating of
  91   // shared array entries that "point" too far backwards.  This can occur,
  92   // for example, when LAB allocation is used in a space covered by the
  93   // table.)
  94   virtual HeapWord* block_start_unsafe(const void* addr) = 0;
  95   // Same as above, but does not have any of the possible side effects
  96   // discussed above.
  97   virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
  98 
  99   // Returns the address of the start of the block containing "addr", or
 100   // else "null" if it is covered by no block.  (May have side effects,
 101   // namely updating of shared array entries that "point" too far
 102   // backwards.  This can occur, for example, when lab allocation is used
 103   // in a space covered by the table.)
 104   inline HeapWord* block_start(const void* addr);


 132 
 133 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
 134   friend class G1BlockOffsetArray;
 135   friend class G1BlockOffsetArrayContigSpace;
 136   friend class VMStructs;
 137 
 138 private:
 139   G1BlockOffsetSharedArrayMappingChangedListener _listener;
 140   // The reserved region covered by the shared array.
 141   MemRegion _reserved;
 142 
 143   // End of the current committed region.
 144   HeapWord* _end;
 145 
 146   // Array for keeping offsets for retrieving object start fast given an
 147   // address.
 148   u_char* _offset_array;          // byte array keeping backwards offsets
 149 
 150   void check_offset(size_t offset, const char* msg) const {
 151     assert(offset <= N_words,
 152            err_msg("%s - "
 153                    "offset: " SIZE_FORMAT ", N_words: %u",
 154                    msg, offset, (uint)N_words));
 155   }
 156 
 157   // Bounds checking accessors:
 158   // For performance these have to devolve to array accesses in product builds.
 159   inline u_char offset_array(size_t index) const;
 160 
 161   void set_offset_array_raw(size_t index, u_char offset) {
 162     _offset_array[index] = offset;
 163   }
 164 
 165   inline void set_offset_array(size_t index, u_char offset);
 166 
 167   inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
 168 
 169   inline void set_offset_array(size_t left, size_t right, u_char offset);
 170 
 171   bool is_card_boundary(HeapWord* p) const;
 172 
 173   void check_index(size_t index, const char* msg) const NOT_DEBUG_RETURN;
 174 




  63 
  64   // The space this table is covering.
  65   HeapWord* _bottom;    // == reserved.start
  66   HeapWord* _end;       // End of currently allocated region.
  67 
  68 public:
  69   // Initialize the table to cover the given space.
  70   // The contents of the initial table are undefined.
  71   G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
  72     _bottom(bottom), _end(end)
  73     {
  74       assert(_bottom <= _end, "arguments out of order");
  75     }
  76 
  77   // Note that the committed size of the covered space may have changed,
  78   // so the table size might also wish to change.
  79   virtual void resize(size_t new_word_size) = 0;
  80 
  81   virtual void set_bottom(HeapWord* new_bottom) {
  82     assert(new_bottom <= _end,
  83            "new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
  84            p2i(new_bottom), p2i(_end));
  85     _bottom = new_bottom;
  86     resize(pointer_delta(_end, _bottom));
  87   }
  88 
  89   // Requires "addr" to be contained by a block, and returns the address of
  90   // the start of that block.  (May have side effects, namely updating of
  91   // shared array entries that "point" too far backwards.  This can occur,
  92   // for example, when LAB allocation is used in a space covered by the
  93   // table.)
  94   virtual HeapWord* block_start_unsafe(const void* addr) = 0;
  95   // Same as above, but does not have any of the possible side effects
  96   // discussed above.
  97   virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
  98 
  99   // Returns the address of the start of the block containing "addr", or
 100   // else "null" if it is covered by no block.  (May have side effects,
 101   // namely updating of shared array entries that "point" too far
 102   // backwards.  This can occur, for example, when lab allocation is used
 103   // in a space covered by the table.)
 104   inline HeapWord* block_start(const void* addr);


 132 
 133 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
 134   friend class G1BlockOffsetArray;
 135   friend class G1BlockOffsetArrayContigSpace;
 136   friend class VMStructs;
 137 
 138 private:
 139   G1BlockOffsetSharedArrayMappingChangedListener _listener;
 140   // The reserved region covered by the shared array.
 141   MemRegion _reserved;
 142 
 143   // End of the current committed region.
 144   HeapWord* _end;
 145 
 146   // Array for keeping offsets for retrieving object start fast given an
 147   // address.
 148   u_char* _offset_array;          // byte array keeping backwards offsets
 149 
 150   void check_offset(size_t offset, const char* msg) const {
 151     assert(offset <= N_words,
 152            "%s - "
 153            "offset: " SIZE_FORMAT ", N_words: %u",
 154            msg, offset, (uint)N_words);
 155   }
 156 
 157   // Bounds checking accessors:
 158   // For performance these have to devolve to array accesses in product builds.
 159   inline u_char offset_array(size_t index) const;
 160 
 161   void set_offset_array_raw(size_t index, u_char offset) {
 162     _offset_array[index] = offset;
 163   }
 164 
 165   inline void set_offset_array(size_t index, u_char offset);
 166 
 167   inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
 168 
 169   inline void set_offset_array(size_t left, size_t right, u_char offset);
 170 
 171   bool is_card_boundary(HeapWord* p) const;
 172 
 173   void check_index(size_t index, const char* msg) const NOT_DEBUG_RETURN;
 174 


< prev index next >