< prev index next >

src/share/vm/gc/g1/heapRegion.cpp

Print this page
rev 12056 : [mq]: simplify


 335     _next_top_at_mark_start = top();
 336     _next_marked_bytes = 0;
 337   } else if (during_conc_mark) {
 338     // During concurrent mark, all objects in the CSet (including
 339     // the ones we find to be self-forwarded) are implicitly live.
 340     // So all objects need to be above NTAMS.
 341     _next_top_at_mark_start = bottom();
 342     _next_marked_bytes = 0;
 343   }
 344 }
 345 
 346 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 347                                                   bool during_conc_mark,
 348                                                   size_t marked_bytes) {
 349   assert(marked_bytes <= used(),
 350          "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
 351   _prev_top_at_mark_start = top();
 352   _prev_marked_bytes = marked_bytes;
 353 }
 354 
 355 HeapWord*
 356 HeapRegion::
 357 oops_on_card_seq_iterate_careful(MemRegion mr,
 358                                  FilterOutOfRegionClosure* cl,
 359                                  bool filter_young,
 360                                  jbyte* card_ptr) {
 361   // Currently, we should only have to clean the card if filter_young
 362   // is true and vice versa.
 363   if (filter_young) {
 364     assert(card_ptr != NULL, "pre-condition");
 365   } else {
 366     assert(card_ptr == NULL, "pre-condition");
 367   }
 368   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 369 
 370   // If we're within a stop-world GC, then we might look at a card in a
 371   // GC alloc region that extends onto a GC LAB, which may not be
 372   // parseable.  Stop such at the "scan_top" of the region.
 373   if (g1h->is_gc_active()) {
 374     mr = mr.intersection(MemRegion(bottom(), scan_top()));
 375   } else {
 376     mr = mr.intersection(used_region());
 377   }
 378   if (mr.is_empty()) return NULL;
 379   // Otherwise, find the obj that extends onto mr.start().
 380 
 381   // The intersection of the incoming mr (for the card) and the
 382   // allocated part of the region is non-empty. This implies that
 383   // we have actually allocated into this region. The code in
 384   // G1CollectedHeap.cpp that allocates a new region sets the
 385   // is_young tag on the region before allocating. Thus we
 386   // safely know if this region is young.
 387   if (is_young() && filter_young) {
 388     return NULL;
 389   }
 390 
 391   assert(!is_young(), "check value of filter_young");
 392 
 393   // We can only clean the card here, after we make the decision that
 394   // the card is not young. And we only clean the card if we have been
 395   // asked to (i.e., card_ptr != NULL).
 396   if (card_ptr != NULL) {
 397     *card_ptr = CardTableModRefBS::clean_card_val();
 398     // We must complete this write before we do any of the reads below.
 399     OrderAccess::storeload();
 400   }
 401 
 402   // Cache the boundaries of the memory region in some const locals
 403   HeapWord* const start = mr.start();
 404   HeapWord* const end = mr.end();
 405 
 406   // We used to use "block_start_careful" here.  But we're actually happy
 407   // to update the BOT while we do this...
 408   HeapWord* cur = block_start(start);
 409   assert(cur <= start, "Postcondition");
 410 
 411   oop obj;
 412 
 413   HeapWord* next = cur;
 414   do {
 415     cur = next;
 416     obj = oop(cur);
 417     if (obj->klass_or_null() == NULL) {
 418       // Ran into an unparseable point.
 419       return cur;


 420     }
 421     // Otherwise...
 422     next = cur + block_size(cur);
 423   } while (next <= start);
 424 
 425   // If we finish the above loop...We have a parseable object that
 426   // begins on or before the start of the memory region, and ends
 427   // inside or spans the entire region.
 428   assert(cur <= start, "Loop postcondition");
 429   assert(obj->klass_or_null() != NULL, "Loop postcondition");
 430 
 431   do {
 432     obj = oop(cur);
 433     assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
 434     if (obj->klass_or_null() == NULL) {
 435       // Ran into an unparseable point.
 436       return cur;


 437     }
 438 
 439     // Advance the current pointer. "obj" still points to the object to iterate.
 440     cur = cur + block_size(cur);
 441 
 442     if (!g1h->is_obj_dead(obj)) {
 443       // Non-objArrays are sometimes marked imprecise at the object start. We
 444       // always need to iterate over them in full.
 445       // We only iterate over object arrays in full if they are completely contained
 446       // in the memory region.
 447       if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
 448         obj->oop_iterate(cl);
 449       } else {
 450         obj->oop_iterate(cl, mr);
 451       }
 452     }
 453   } while (cur < end);
 454 
 455   return NULL;
 456 }
 457 
 458 // Code roots support
 459 
 460 void HeapRegion::add_strong_code_root(nmethod* nm) {
 461   HeapRegionRemSet* hrrs = rem_set();
 462   hrrs->add_strong_code_root(nm);
 463 }
 464 
 465 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
 466   assert_locked_or_safepoint(CodeCache_lock);
 467   HeapRegionRemSet* hrrs = rem_set();
 468   hrrs->add_strong_code_root_locked(nm);
 469 }
 470 
 471 void HeapRegion::remove_strong_code_root(nmethod* nm) {
 472   HeapRegionRemSet* hrrs = rem_set();
 473   hrrs->remove_strong_code_root(nm);
 474 }
 475 




 335     _next_top_at_mark_start = top();
 336     _next_marked_bytes = 0;
 337   } else if (during_conc_mark) {
 338     // During concurrent mark, all objects in the CSet (including
 339     // the ones we find to be self-forwarded) are implicitly live.
 340     // So all objects need to be above NTAMS.
 341     _next_top_at_mark_start = bottom();
 342     _next_marked_bytes = 0;
 343   }
 344 }
 345 
 346 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 347                                                   bool during_conc_mark,
 348                                                   size_t marked_bytes) {
 349   assert(marked_bytes <= used(),
 350          "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
 351   _prev_top_at_mark_start = top();
 352   _prev_marked_bytes = marked_bytes;
 353 }
 354 
 355 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,


 356                                                   FilterOutOfRegionClosure* cl,

 357                                                   jbyte* card_ptr) {



 358   assert(card_ptr != NULL, "pre-condition");



 359   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 360 
 361   // If we're within a stop-world GC, then we might look at a card in a
 362   // GC alloc region that extends onto a GC LAB, which may not be
 363   // parseable.  Stop such at the "scan_top" of the region.
 364   if (g1h->is_gc_active()) {
 365     mr = mr.intersection(MemRegion(bottom(), scan_top()));
 366   } else {
 367     mr = mr.intersection(used_region());
 368   }
 369   if (mr.is_empty()) return true;
 370   // Otherwise, find the obj that extends onto mr.start().
 371 
 372   // The intersection of the incoming mr (for the card) and the
 373   // allocated part of the region is non-empty. This implies that
 374   // we have actually allocated into this region. The code in
 375   // G1CollectedHeap.cpp that allocates a new region sets the
 376   // is_young tag on the region before allocating. Thus we
 377   // safely know if this region is young.
 378   if (is_young()) {
 379     return true;
 380   }
 381 


 382   // We can only clean the card here, after we make the decision that
 383   // the card is not young.


 384   *card_ptr = CardTableModRefBS::clean_card_val();
 385   // We must complete this write before we do any of the reads below.
 386   OrderAccess::storeload();

 387 
 388   // Cache the boundaries of the memory region in some const locals
 389   HeapWord* const start = mr.start();
 390   HeapWord* const end = mr.end();
 391 
 392   // Update BOT as needed while finding start of (potential) object.

 393   HeapWord* cur = block_start(start);
 394   assert(cur <= start, "Postcondition");
 395 
 396   oop obj;
 397 
 398   HeapWord* next = cur;
 399   do {
 400     cur = next;
 401     obj = oop(cur);
 402     if (obj->klass_or_null() == NULL) {
 403       // Ran into an unparseable point.
 404       assert(!g1h->is_gc_active(),
 405              "Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
 406       return false;
 407     }
 408     // Otherwise...
 409     next = cur + block_size(cur);
 410   } while (next <= start);
 411 
 412   // If we finish the above loop...We have a parseable object that
 413   // begins on or before the start of the memory region, and ends
 414   // inside or spans the entire region.
 415   assert(cur <= start, "Loop postcondition");
 416   assert(obj->klass_or_null() != NULL, "Loop postcondition");
 417 
 418   do {
 419     obj = oop(cur);
 420     assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
 421     if (obj->klass_or_null() == NULL) {
 422       // Ran into an unparseable point.
 423       assert(!g1h->is_gc_active(),
 424              "Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
 425       return false;
 426     }
 427 
 428     // Advance the current pointer. "obj" still points to the object to iterate.
 429     cur = cur + block_size(cur);
 430 
 431     if (!g1h->is_obj_dead(obj)) {
 432       // Non-objArrays are sometimes marked imprecise at the object start. We
 433       // always need to iterate over them in full.
 434       // We only iterate over object arrays in full if they are completely contained
 435       // in the memory region.
 436       if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
 437         obj->oop_iterate(cl);
 438       } else {
 439         obj->oop_iterate(cl, mr);
 440       }
 441     }
 442   } while (cur < end);
 443 
 444   return true;
 445 }
 446 
 447 // Code roots support
 448 
 449 void HeapRegion::add_strong_code_root(nmethod* nm) {
 450   HeapRegionRemSet* hrrs = rem_set();
 451   hrrs->add_strong_code_root(nm);
 452 }
 453 
 454 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
 455   assert_locked_or_safepoint(CodeCache_lock);
 456   HeapRegionRemSet* hrrs = rem_set();
 457   hrrs->add_strong_code_root_locked(nm);
 458 }
 459 
 460 void HeapRegion::remove_strong_code_root(nmethod* nm) {
 461   HeapRegionRemSet* hrrs = rem_set();
 462   hrrs->remove_strong_code_root(nm);
 463 }
 464 


< prev index next >