< prev index next >

src/share/vm/gc/g1/heapRegion.cpp

Print this page
rev 8978 : imported patch remove_err_msg


 148   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
 149 
 150   guarantee(CardsPerRegion == 0, "we should only set it once");
 151   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
 152 }
 153 
 154 void HeapRegion::reset_after_compaction() {
 155   G1OffsetTableContigSpace::reset_after_compaction();
 156   // After a compaction the mark bitmap is invalid, so we must
 157   // treat all objects as being inside the unmarked area.
 158   zero_marked_bytes();
 159   init_top_at_mark_start();
 160 }
 161 
 162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
 163   assert(_humongous_start_region == NULL,
 164          "we should have already filtered out humongous regions");
 165   assert(_end == orig_end(),
 166          "we should have already filtered out humongous regions");
 167   assert(!in_collection_set(),
 168          err_msg("Should not clear heap region %u in the collection set", hrm_index()));
 169 
 170   set_allocation_context(AllocationContext::system());
 171   set_young_index_in_cset(-1);
 172   uninstall_surv_rate_group();
 173   set_free();
 174   reset_pre_dummy_top();
 175 
 176   if (!par) {
 177     // If this is parallel, this will be done later.
 178     HeapRegionRemSet* hrrs = rem_set();
 179     if (locked) {
 180       hrrs->clear_locked();
 181     } else {
 182       hrrs->clear();
 183     }
 184   }
 185   zero_marked_bytes();
 186 
 187   _offsets.resize(HeapRegion::GrainWords);
 188   init_top_at_mark_start();


 275      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 276     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 277     _predicted_bytes_to_copy(0)
 278 {
 279   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
 280   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 281 
 282   initialize(mr);
 283 }
 284 
 285 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 286   assert(_rem_set->is_empty(), "Remembered set must be empty");
 287 
 288   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 289 
 290   hr_clear(false /*par*/, false /*clear_space*/);
 291   set_top(bottom());
 292   record_timestamp();
 293 
 294   assert(mr.end() == orig_end(),
 295          err_msg("Given region end address " PTR_FORMAT " should match exactly "
 296                  "bottom plus one region size, i.e. " PTR_FORMAT,
 297                  p2i(mr.end()), p2i(orig_end())));
 298 }
 299 
 300 CompactibleSpace* HeapRegion::next_compaction_space() const {
 301   return G1CollectedHeap::heap()->next_compaction_region(this);
 302 }
 303 
 304 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 305                                                     bool during_conc_mark) {
 306   // We always recreate the prev marking info and we'll explicitly
 307   // mark all objects we find to be self-forwarded on the prev
 308   // bitmap. So all objects need to be below PTAMS.
 309   _prev_marked_bytes = 0;
 310 
 311   if (during_initial_mark) {
 312     // During initial-mark, we'll also explicitly mark all objects
 313     // we find to be self-forwarded on the next bitmap. So all
 314     // objects need to be below NTAMS.
 315     _next_top_at_mark_start = top();
 316     _next_marked_bytes = 0;
 317   } else if (during_conc_mark) {
 318     // During concurrent mark, all objects in the CSet (including
 319     // the ones we find to be self-forwarded) are implicitly live.
 320     // So all objects need to be above NTAMS.
 321     _next_top_at_mark_start = bottom();
 322     _next_marked_bytes = 0;
 323   }
 324 }
 325 
 326 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 327                                                   bool during_conc_mark,
 328                                                   size_t marked_bytes) {
 329   assert(marked_bytes <= used(),
 330          err_msg("marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()));
 331   _prev_top_at_mark_start = top();
 332   _prev_marked_bytes = marked_bytes;
 333 }
 334 
 335 HeapWord*
 336 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 337                                                  ObjectClosure* cl) {
 338   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 339   // We used to use "block_start_careful" here.  But we're actually happy
 340   // to update the BOT while we do this...
 341   HeapWord* cur = block_start(mr.start());
 342   mr = mr.intersection(used_region());
 343   if (mr.is_empty()) return NULL;
 344   // Otherwise, find the obj that extends onto mr.start().
 345 
 346   assert(cur <= mr.start()
 347          && (oop(cur)->klass_or_null() == NULL ||
 348              cur + oop(cur)->size() > mr.start()),
 349          "postcondition of block_start");
 350   oop obj;




 148   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
 149 
 150   guarantee(CardsPerRegion == 0, "we should only set it once");
 151   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
 152 }
 153 
 154 void HeapRegion::reset_after_compaction() {
 155   G1OffsetTableContigSpace::reset_after_compaction();
 156   // After a compaction the mark bitmap is invalid, so we must
 157   // treat all objects as being inside the unmarked area.
 158   zero_marked_bytes();
 159   init_top_at_mark_start();
 160 }
 161 
 162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
 163   assert(_humongous_start_region == NULL,
 164          "we should have already filtered out humongous regions");
 165   assert(_end == orig_end(),
 166          "we should have already filtered out humongous regions");
 167   assert(!in_collection_set(),
 168          "Should not clear heap region %u in the collection set", hrm_index());
 169 
 170   set_allocation_context(AllocationContext::system());
 171   set_young_index_in_cset(-1);
 172   uninstall_surv_rate_group();
 173   set_free();
 174   reset_pre_dummy_top();
 175 
 176   if (!par) {
 177     // If this is parallel, this will be done later.
 178     HeapRegionRemSet* hrrs = rem_set();
 179     if (locked) {
 180       hrrs->clear_locked();
 181     } else {
 182       hrrs->clear();
 183     }
 184   }
 185   zero_marked_bytes();
 186 
 187   _offsets.resize(HeapRegion::GrainWords);
 188   init_top_at_mark_start();


 275      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 276     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 277     _predicted_bytes_to_copy(0)
 278 {
 279   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
 280   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 281 
 282   initialize(mr);
 283 }
 284 
 285 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 286   assert(_rem_set->is_empty(), "Remembered set must be empty");
 287 
 288   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 289 
 290   hr_clear(false /*par*/, false /*clear_space*/);
 291   set_top(bottom());
 292   record_timestamp();
 293 
 294   assert(mr.end() == orig_end(),
 295          "Given region end address " PTR_FORMAT " should match exactly "
 296          "bottom plus one region size, i.e. " PTR_FORMAT,
 297          p2i(mr.end()), p2i(orig_end()));
 298 }
 299 
 300 CompactibleSpace* HeapRegion::next_compaction_space() const {
 301   return G1CollectedHeap::heap()->next_compaction_region(this);
 302 }
 303 
 304 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 305                                                     bool during_conc_mark) {
 306   // We always recreate the prev marking info and we'll explicitly
 307   // mark all objects we find to be self-forwarded on the prev
 308   // bitmap. So all objects need to be below PTAMS.
 309   _prev_marked_bytes = 0;
 310 
 311   if (during_initial_mark) {
 312     // During initial-mark, we'll also explicitly mark all objects
 313     // we find to be self-forwarded on the next bitmap. So all
 314     // objects need to be below NTAMS.
 315     _next_top_at_mark_start = top();
 316     _next_marked_bytes = 0;
 317   } else if (during_conc_mark) {
 318     // During concurrent mark, all objects in the CSet (including
 319     // the ones we find to be self-forwarded) are implicitly live.
 320     // So all objects need to be above NTAMS.
 321     _next_top_at_mark_start = bottom();
 322     _next_marked_bytes = 0;
 323   }
 324 }
 325 
 326 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 327                                                   bool during_conc_mark,
 328                                                   size_t marked_bytes) {
 329   assert(marked_bytes <= used(),
 330          "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
 331   _prev_top_at_mark_start = top();
 332   _prev_marked_bytes = marked_bytes;
 333 }
 334 
 335 HeapWord*
 336 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 337                                                  ObjectClosure* cl) {
 338   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 339   // We used to use "block_start_careful" here.  But we're actually happy
 340   // to update the BOT while we do this...
 341   HeapWord* cur = block_start(mr.start());
 342   mr = mr.intersection(used_region());
 343   if (mr.is_empty()) return NULL;
 344   // Otherwise, find the obj that extends onto mr.start().
 345 
 346   assert(cur <= mr.start()
 347          && (oop(cur)->klass_or_null() == NULL ||
 348              cur + oop(cur)->size() > mr.start()),
 349          "postcondition of block_start");
 350   oop obj;


< prev index next >