210 }
211
212 // Return the next threshold, the point at which the table should be
213 // updated.
214 HeapWord* threshold() const { return _next_offset_threshold; }
215
216 // These must be guaranteed to work properly (i.e., do nothing)
217 // when "blk_start" ("blk" for second version) is "NULL". In this
218 // implementation, that's true because NULL is represented as 0, and thus
219 // never exceeds the "_next_offset_threshold".
220 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
221 if (blk_end > _next_offset_threshold) {
222 alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
223 }
224 }
225 void alloc_block(HeapWord* blk, size_t size) {
226 alloc_block(blk, blk+size);
227 }
228
229 void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);
230 void set_object_can_span(bool can_span) PRODUCT_RETURN;
231
232 void print_on(outputStream* out) PRODUCT_RETURN;
233 };
234
235 #endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
|
210 }
211
212 // Return the next threshold, the point at which the table should be
213 // updated.
214 HeapWord* threshold() const { return _next_offset_threshold; }
215
216 // These must be guaranteed to work properly (i.e., do nothing)
217 // when "blk_start" ("blk" for second version) is "NULL". In this
218 // implementation, that's true because NULL is represented as 0, and thus
219 // never exceeds the "_next_offset_threshold".
220 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
221 if (blk_end > _next_offset_threshold) {
222 alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
223 }
224 }
225 void alloc_block(HeapWord* blk, size_t size) {
226 alloc_block(blk, blk+size);
227 }
228
229 void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);
230 void set_object_can_span(bool can_span) NOT_DEBUG_RETURN;
231
232 void print_on(outputStream* out) PRODUCT_RETURN;
233 };
234
235 #endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
|