< prev index next >

src/share/vm/gc_implementation/g1/heapRegion.cpp

Print this page
rev 7793 : 8073315: Enable gcc -Wtype-limits and fix upcoming issues.


 308   _prev_marked_bytes = 0;
 309 
 310   if (during_initial_mark) {
 311     // During initial-mark, we'll also explicitly mark all objects
 312     // we find to be self-forwarded on the next bitmap. So all
 313     // objects need to be below NTAMS.
 314     _next_top_at_mark_start = top();
 315     _next_marked_bytes = 0;
 316   } else if (during_conc_mark) {
 317     // During concurrent mark, all objects in the CSet (including
 318     // the ones we find to be self-forwarded) are implicitly live.
 319     // So all objects need to be above NTAMS.
 320     _next_top_at_mark_start = bottom();
 321     _next_marked_bytes = 0;
 322   }
 323 }
 324 
 325 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 326                                                   bool during_conc_mark,
 327                                                   size_t marked_bytes) {
 328   assert(0 <= marked_bytes && marked_bytes <= used(),
 329          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
 330                  marked_bytes, used()));
 331   _prev_top_at_mark_start = top();
 332   _prev_marked_bytes = marked_bytes;
 333 }
 334 
 335 HeapWord*
 336 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 337                                                  ObjectClosure* cl) {
 338   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 339   // We used to use "block_start_careful" here.  But we're actually happy
 340   // to update the BOT while we do this...
 341   HeapWord* cur = block_start(mr.start());
 342   mr = mr.intersection(used_region());
 343   if (mr.is_empty()) return NULL;
 344   // Otherwise, find the obj that extends onto mr.start().
 345 
 346   assert(cur <= mr.start()
 347          && (oop(cur)->klass_or_null() == NULL ||
 348              cur + oop(cur)->size() > mr.start()),
 349          "postcondition of block_start");
 350   oop obj;




 308   _prev_marked_bytes = 0;
 309 
 310   if (during_initial_mark) {
 311     // During initial-mark, we'll also explicitly mark all objects
 312     // we find to be self-forwarded on the next bitmap. So all
 313     // objects need to be below NTAMS.
 314     _next_top_at_mark_start = top();
 315     _next_marked_bytes = 0;
 316   } else if (during_conc_mark) {
 317     // During concurrent mark, all objects in the CSet (including
 318     // the ones we find to be self-forwarded) are implicitly live.
 319     // So all objects need to be above NTAMS.
 320     _next_top_at_mark_start = bottom();
 321     _next_marked_bytes = 0;
 322   }
 323 }
 324 
 325 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 326                                                   bool during_conc_mark,
 327                                                   size_t marked_bytes) {
 328   assert(marked_bytes <= used(),
 329          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, marked_bytes, used()));

 330   _prev_top_at_mark_start = top();
 331   _prev_marked_bytes = marked_bytes;
 332 }
 333 
 334 HeapWord*
 335 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 336                                                  ObjectClosure* cl) {
 337   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 338   // We used to use "block_start_careful" here.  But we're actually happy
 339   // to update the BOT while we do this...
 340   HeapWord* cur = block_start(mr.start());
 341   mr = mr.intersection(used_region());
 342   if (mr.is_empty()) return NULL;
 343   // Otherwise, find the obj that extends onto mr.start().
 344 
 345   assert(cur <= mr.start()
 346          && (oop(cur)->klass_or_null() == NULL ||
 347              cur + oop(cur)->size() > mr.start()),
 348          "postcondition of block_start");
 349   oop obj;


< prev index next >