< prev index next >

src/hotspot/share/gc/g1/heapRegion.inline.hpp

Print this page
rev 52719 : [mq]: 8159440-marking-of-promoted-objects-to-concurrent


 235 
 236 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 237                                                      size_t desired_word_size,
 238                                                      size_t* actual_word_size) {
 239   assert(is_young(), "we can only skip BOT updates on young regions");
 240   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 241 }
 242 
 243 inline void HeapRegion::note_start_of_marking() {
 244   _next_marked_bytes = 0;
 245   _next_top_at_mark_start = top();
 246 }
 247 
 248 inline void HeapRegion::note_end_of_marking() {
 249   _prev_top_at_mark_start = _next_top_at_mark_start;
 250   _next_top_at_mark_start = bottom();
 251   _prev_marked_bytes = _next_marked_bytes;
 252   _next_marked_bytes = 0;
 253 }
 254 
 255 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
 256   if (is_survivor()) {
 257     // This is how we always allocate survivors.
 258     assert(_next_top_at_mark_start == bottom(), "invariant");
 259   } else {
 260     if (during_initial_mark) {
 261       // During initial-mark we'll explicitly mark any objects on old
 262       // regions that are pointed to by roots. Given that explicit
 263       // marks only make sense under NTAMS it'd be nice if we could
 264       // check that condition if we wanted to. Given that we don't
 265       // know where the top of this region will end up, we simply set
 266       // NTAMS to the end of the region so all marks will be below
 267       // NTAMS. We'll set it to the actual top when we retire this region.
 268       _next_top_at_mark_start = end();
 269     } else {
 270       // We could have re-used this old region as to-space over a
 271       // couple of GCs since the start of the concurrent marking
 272       // cycle. This means that [bottom,NTAMS) will contain objects
 273       // copied up to and including initial-mark and [NTAMS, top)
 274       // will contain objects copied during the concurrent marking cycle.
 275       assert(top() >= _next_top_at_mark_start, "invariant");
 276     }
 277   }
 278 }
 279 
 280 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
 281   if (is_survivor()) {
 282     // This is how we always allocate survivors.
 283     assert(_next_top_at_mark_start == bottom(), "invariant");
 284   } else {
 285     if (during_initial_mark) {
 286       // See the comment for note_start_of_copying() for the details
 287       // on this.
 288       assert(_next_top_at_mark_start == end(), "pre-condition");
 289       _next_top_at_mark_start = top();
 290     } else {
 291       // See the comment for note_start_of_copying() for the details
 292       // on this.
 293       assert(top() >= _next_top_at_mark_start, "invariant");
 294     }
 295   }
 296 }
 297 
 298 inline bool HeapRegion::in_collection_set() const {
 299   return G1CollectedHeap::heap()->is_in_cset(this);
 300 }
 301 
 302 template <class Closure, bool is_gc_active>
 303 bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr,
 304                                               Closure* cl,
 305                                               G1CollectedHeap* g1h) {
 306   assert(is_humongous(), "precondition");
 307   HeapRegion* sr = humongous_start_region();
 308   oop obj = oop(sr->bottom());
 309 
 310   // If concurrent and klass_or_null is NULL, then space has been
 311   // allocated but the object has not yet been published by setting
 312   // the klass.  That can only happen if the card is stale.  However,
 313   // we've already set the card clean, so we must return failure,
 314   // since the allocating thread could have performed a write to the
 315   // card that might be missed otherwise.
 316   if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
 317     return false;




 235 
 236 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 237                                                      size_t desired_word_size,
 238                                                      size_t* actual_word_size) {
 239   assert(is_young(), "we can only skip BOT updates on young regions");
 240   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 241 }
 242 
 243 inline void HeapRegion::note_start_of_marking() {
 244   _next_marked_bytes = 0;
 245   _next_top_at_mark_start = top();
 246 }
 247 
 248 inline void HeapRegion::note_end_of_marking() {
 249   _prev_top_at_mark_start = _next_top_at_mark_start;
 250   _next_top_at_mark_start = bottom();
 251   _prev_marked_bytes = _next_marked_bytes;
 252   _next_marked_bytes = 0;
 253 }
 254 











































 255 inline bool HeapRegion::in_collection_set() const {
 256   return G1CollectedHeap::heap()->is_in_cset(this);
 257 }
 258 
 259 template <class Closure, bool is_gc_active>
 260 bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr,
 261                                               Closure* cl,
 262                                               G1CollectedHeap* g1h) {
 263   assert(is_humongous(), "precondition");
 264   HeapRegion* sr = humongous_start_region();
 265   oop obj = oop(sr->bottom());
 266 
 267   // If concurrent and klass_or_null is NULL, then space has been
 268   // allocated but the object has not yet been published by setting
 269   // the klass.  That can only happen if the card is stale.  However,
 270   // we've already set the card clean, so we must return failure,
 271   // since the allocating thread could have performed a write to the
 272   // card that might be missed otherwise.
 273   if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
 274     return false;


< prev index next >