< prev index next >

src/share/vm/gc/g1/g1BlockOffsetTable.hpp

Print this page
rev 12504 : 8173764: Assert in G1 BOT is wrong
Reviewed-by:


  99   // Return the appropriate index into "_offset_array" for "p".
 100   inline size_t index_for(const void* p) const;
 101   inline size_t index_for_raw(const void* p) const;
 102 
 103   // Return the address indicating the start of the region corresponding to
 104   // "index" in "_offset_array".
 105   inline HeapWord* address_for_index(size_t index) const;
 106   // Variant of address_for_index that does not check the index for validity.
 107   inline HeapWord* address_for_index_raw(size_t index) const {
 108     return _reserved.start() + (index << BOTConstants::LogN_words);
 109   }
 110 };
 111 
 112 class G1BlockOffsetTablePart VALUE_OBJ_CLASS_SPEC {
 113   friend class G1BlockOffsetTable;
 114   friend class VMStructs;
 115 private:
 116   // allocation boundary at which offset array must be updated
 117   HeapWord* _next_offset_threshold;
 118   size_t    _next_offset_index;      // index corresponding to that boundary

 119 
 120   // This is the global BlockOffsetTable.
 121   G1BlockOffsetTable* _bot;
 122 
 123   // The space that owns this subregion.
 124   G1ContiguousSpace* _space;
 125 
 126   // Sets the entries
 127   // corresponding to the cards starting at "start" and ending at "end"
 128   // to point back to the card before "start": the interval [start, end)
 129   // is right-open.
 130   void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
 131   // Same as above, except that the args here are a card _index_ interval
 132   // that is closed: [start_index, end_index]
 133   void set_remainder_to_point_to_start_incl(size_t start, size_t end);
 134 
 135   // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
 136   // memory first.
 137   void zero_bottom_entry_raw();
 138   // Variant of initialize_threshold that does not check for availability of the


 207   }
 208 
 209   // Return the next threshold, the point at which the table should be
 210   // updated.
 211   HeapWord* threshold() const { return _next_offset_threshold; }
 212 
 213   // These must be guaranteed to work properly (i.e., do nothing)
 214   // when "blk_start" ("blk" for second version) is "NULL".  In this
 215   // implementation, that's true because NULL is represented as 0, and thus
 216   // never exceeds the "_next_offset_threshold".
 217   void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
 218     if (blk_end > _next_offset_threshold) {
 219       alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
 220     }
 221   }
 222   void alloc_block(HeapWord* blk, size_t size) {
 223     alloc_block(blk, blk+size);
 224   }
 225 
 226   void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);

 227 
 228   void print_on(outputStream* out) PRODUCT_RETURN;
 229 };
 230 
 231 #endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP


  99   // Return the appropriate index into "_offset_array" for "p".
 100   inline size_t index_for(const void* p) const;
 101   inline size_t index_for_raw(const void* p) const;
 102 
 103   // Return the address indicating the start of the region corresponding to
 104   // "index" in "_offset_array".
 105   inline HeapWord* address_for_index(size_t index) const;
 106   // Variant of address_for_index that does not check the index for validity.
 107   inline HeapWord* address_for_index_raw(size_t index) const {
 108     return _reserved.start() + (index << BOTConstants::LogN_words);
 109   }
 110 };
 111 
 112 class G1BlockOffsetTablePart VALUE_OBJ_CLASS_SPEC {
 113   friend class G1BlockOffsetTable;
 114   friend class VMStructs;
 115 private:
 116   // allocation boundary at which offset array must be updated
 117   HeapWord* _next_offset_threshold;
 118   size_t    _next_offset_index;      // index corresponding to that boundary
 119   bool      _object_can_span;        // Set for continues humongous
 120 
 121   // This is the global BlockOffsetTable.
 122   G1BlockOffsetTable* _bot;
 123 
 124   // The space that owns this subregion.
 125   G1ContiguousSpace* _space;
 126 
 127   // Sets the entries
 128   // corresponding to the cards starting at "start" and ending at "end"
 129   // to point back to the card before "start": the interval [start, end)
 130   // is right-open.
 131   void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
 132   // Same as above, except that the args here are a card _index_ interval
 133   // that is closed: [start_index, end_index]
 134   void set_remainder_to_point_to_start_incl(size_t start, size_t end);
 135 
 136   // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
 137   // memory first.
 138   void zero_bottom_entry_raw();
 139   // Variant of initialize_threshold that does not check for availability of the


 208   }
 209 
 210   // Return the next threshold, the point at which the table should be
 211   // updated.
 212   HeapWord* threshold() const { return _next_offset_threshold; }
 213 
 214   // These must be guaranteed to work properly (i.e., do nothing)
 215   // when "blk_start" ("blk" for second version) is "NULL".  In this
 216   // implementation, that's true because NULL is represented as 0, and thus
 217   // never exceeds the "_next_offset_threshold".
 218   void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
 219     if (blk_end > _next_offset_threshold) {
 220       alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
 221     }
 222   }
 223   void alloc_block(HeapWord* blk, size_t size) {
 224     alloc_block(blk, blk+size);
 225   }
 226 
 227   void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);
 228   void set_continues_humongous(bool is_humongous);
 229 
 230   void print_on(outputStream* out) PRODUCT_RETURN;
 231 };
 232 
 233 #endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
< prev index next >