src/share/vm/gc_implementation/g1/heapRegion.cpp

Print this page
rev 6589 : [mq]: 8047821.g1savemarks.more_cleanups


 376   record_top_and_timestamp();
 377 
 378   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 379 }
 380 
 381 CompactibleSpace* HeapRegion::next_compaction_space() const {
 382   // We're not using an iterator given that it will wrap around when
 383   // it reaches the last region and this is not what we want here.
 384   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 385   uint index = hrs_index() + 1;
 386   while (index < g1h->n_regions()) {
 387     HeapRegion* hr = g1h->region_at(index);
 388     if (!hr->isHumongous()) {
 389       return hr;
 390     }
 391     index += 1;
 392   }
 393   return NULL;
 394 }
 395 
 396 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
 397   HeapWord* p = mr.start();
 398   HeapWord* e = mr.end();
 399   oop obj;
 400   while (p < e) {
 401     obj = oop(p);
 402     p += obj->oop_iterate(cl);
 403   }
 404   assert(p == e, "bad memregion: doesn't end on obj boundary");
 405 }
 406 
 407 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 408                                                     bool during_conc_mark) {
 409   // We always recreate the prev marking info and we'll explicitly
 410   // mark all objects we find to be self-forwarded on the prev
 411   // bitmap. So all objects need to be below PTAMS.
 412   _prev_top_at_mark_start = top();
 413   _prev_marked_bytes = 0;
 414 
 415   if (during_initial_mark) {
 416     // During initial-mark, we'll also explicitly mark all objects
 417     // we find to be self-forwarded on the next bitmap. So all
 418     // objects need to be below NTAMS.
 419     _next_top_at_mark_start = top();
 420     _next_marked_bytes = 0;
 421   } else if (during_conc_mark) {
 422     // During concurrent mark, all objects in the CSet (including
 423     // the ones we find to be self-forwarded) are implicitly live.
 424     // So all objects need to be above NTAMS.
 425     _next_top_at_mark_start = bottom();
 426     _next_marked_bytes = 0;




 376   record_top_and_timestamp();
 377 
 378   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 379 }
 380 
 381 CompactibleSpace* HeapRegion::next_compaction_space() const {
 382   // We're not using an iterator given that it will wrap around when
 383   // it reaches the last region and this is not what we want here.
 384   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 385   uint index = hrs_index() + 1;
 386   while (index < g1h->n_regions()) {
 387     HeapRegion* hr = g1h->region_at(index);
 388     if (!hr->isHumongous()) {
 389       return hr;
 390     }
 391     index += 1;
 392   }
 393   return NULL;
 394 }
 395 











 396 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 397                                                     bool during_conc_mark) {
 398   // We always recreate the prev marking info and we'll explicitly
 399   // mark all objects we find to be self-forwarded on the prev
 400   // bitmap. So all objects need to be below PTAMS.
 401   _prev_top_at_mark_start = top();
 402   _prev_marked_bytes = 0;
 403 
 404   if (during_initial_mark) {
 405     // During initial-mark, we'll also explicitly mark all objects
 406     // we find to be self-forwarded on the next bitmap. So all
 407     // objects need to be below NTAMS.
 408     _next_top_at_mark_start = top();
 409     _next_marked_bytes = 0;
 410   } else if (during_conc_mark) {
 411     // During concurrent mark, all objects in the CSet (including
 412     // the ones we find to be self-forwarded) are implicitly live.
 413     // So all objects need to be above NTAMS.
 414     _next_top_at_mark_start = bottom();
 415     _next_marked_bytes = 0;