< prev index next >

src/share/vm/gc_implementation/g1/heapRegion.cpp

Print this page
rev 9374 : 8259659: Missing memory fences between memory allocation and refinement
Summary: Refactored to have needed barrier
Reviewed-by: tschatzl, ehelin


 411 // special handling for concurrent processing encountering an
 412 // in-progress allocation.
 413 static bool do_oops_on_card_in_humongous(MemRegion mr,
 414                                          FilterOutOfRegionClosure* cl,
 415                                          HeapRegion* hr,
 416                                          G1CollectedHeap* g1h) {
 417   assert(hr->isHumongous(), "precondition");
 418   HeapRegion* sr = hr->humongous_start_region();
 419   oop obj = oop(sr->bottom());
 420 
 421   // If concurrent and klass_or_null is NULL, then space has been
 422   // allocated but the object has not yet been published by setting
 423   // the klass.  That can only happen if the card is stale.  However,
 424   // we've already set the card clean, so we must return failure,
 425   // since the allocating thread could have performed a write to the
 426   // card that might be missed otherwise.
 427   if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
 428     return false;
 429   }
 430 

 431   // Only filler objects follow a humongous object in the containing
 432   // regions, and we can ignore those.  So only process the one
 433   // humongous object.
 434   if (!g1h->is_obj_dead(obj, sr)) {
 435     if (obj->is_objArray() || (sr->bottom() < mr.start())) {
 436       // objArrays are always marked precisely, so limit processing
 437       // with mr.  Non-objArrays might be precisely marked, and since
 438       // it's humongous it's worthwhile avoiding full processing.
 439       // However, the card could be stale and only cover filler
 440       // objects.  That should be rare, so not worth checking for;
 441       // instead let it fall out from the bounded iteration.
 442       obj->oop_iterate(cl, mr);
 443     } else {
 444       // If obj is not an objArray and mr contains the start of the
 445       // obj, then this could be an imprecise mark, and we need to
 446       // process the entire object.
 447       obj->oop_iterate(cl);
 448     }
 449   }
 450   return true;
 451 }
 452 
 453 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
 454                                                   FilterOutOfRegionClosure* cl,
 455                                                   jbyte* card_ptr) {
 456   assert(card_ptr != NULL, "pre-condition");
 457   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 458 
 459   // If we're within a stop-world GC, then we might look at a card in a
 460   // GC alloc region that extends onto a GC LAB, which may not be
 461   // parseable.  Stop such at the "scan_top" of the region.
 462   if (g1h->is_gc_active()) {
 463     mr = mr.intersection(MemRegion(bottom(), scan_top()));
 464   } else {
 465     mr = mr.intersection(used_region());
 466   }
 467   if (mr.is_empty()) {
 468     return true;
 469   }
 470 
 471   // The intersection of the incoming mr (for the card) and the
 472   // allocated part of the region is non-empty. This implies that
 473   // we have actually allocated into this region. The code in
 474   // G1CollectedHeap.cpp that allocates a new region sets the
 475   // is_young tag on the region before allocating. Thus we
 476   // safely know if this region is young.
 477   if (is_young()) {
 478     return true;
 479   }
 480 
 481   // We can only clean the card here, after we make the decision that
 482   // the card is not young.
 483   *card_ptr = CardTableModRefBS::clean_card_val();
 484   // We must complete this write before we do any of the reads below.
 485   OrderAccess::storeload();
 486 
 487   // Special handling for humongous regions.
 488   if (isHumongous()) {
 489     return do_oops_on_card_in_humongous(mr, cl, this, g1h);
 490   }

 491 
 492   // During GC we limit mr by scan_top. So we never get here with an
 493   // mr covering objects allocated during GC.  Non-humongous objects
 494   // are only allocated in the old-gen during GC.  So the parts of the
 495   // heap that may be examined here are always parsable; there's no
 496   // need to use klass_or_null here to detect in-progress allocations.
 497 
 498   // Cache the boundaries of the memory region in some const locals
 499   HeapWord* const start = mr.start();
 500   HeapWord* const end = mr.end();
 501 
 502   // Find the obj that extends onto mr.start().
 503   // Update BOT as needed while finding start of (possibly dead)
 504   // object containing the start of the region.
 505   HeapWord* cur = block_start(start);
 506 
 507 #ifdef ASSERT
 508   {
 509     assert(cur <= start,
 510            err_msg("cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)));
 511     HeapWord* next = cur + block_size(cur);
 512     assert(start < next,
 513            err_msg("start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)));
 514   }
 515 #endif
 516 




 411 // special handling for concurrent processing encountering an
 412 // in-progress allocation.
 413 static bool do_oops_on_card_in_humongous(MemRegion mr,
 414                                          FilterOutOfRegionClosure* cl,
 415                                          HeapRegion* hr,
 416                                          G1CollectedHeap* g1h) {
 417   assert(hr->isHumongous(), "precondition");
 418   HeapRegion* sr = hr->humongous_start_region();
 419   oop obj = oop(sr->bottom());
 420 
 421   // If concurrent and klass_or_null is NULL, then space has been
 422   // allocated but the object has not yet been published by setting
 423   // the klass.  That can only happen if the card is stale.  However,
 424   // we've already set the card clean, so we must return failure,
 425   // since the allocating thread could have performed a write to the
 426   // card that might be missed otherwise.
 427   if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
 428     return false;
 429   }
 430 
 431   // We have a well-formed humongous object at the start of sr.
 432   // Only filler objects follow a humongous object in the containing
 433   // regions, and we can ignore those.  So only process the one
 434   // humongous object.
 435   if (!g1h->is_obj_dead(obj, sr)) {
 436     if (obj->is_objArray() || (sr->bottom() < mr.start())) {
 437       // objArrays are always marked precisely, so limit processing
 438       // with mr.  Non-objArrays might be precisely marked, and since
 439       // it's humongous it's worthwhile avoiding full processing.
 440       // However, the card could be stale and only cover filler
 441       // objects.  That should be rare, so not worth checking for;
 442       // instead let it fall out from the bounded iteration.
 443       obj->oop_iterate(cl, mr);
 444     } else {
 445       // If obj is not an objArray and mr contains the start of the
 446       // obj, then this could be an imprecise mark, and we need to
 447       // process the entire object.
 448       obj->oop_iterate(cl);
 449     }
 450   }
 451   return true;
 452 }
 453 
 454 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
 455                                                   FilterOutOfRegionClosure* cl) {
 456   assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");

 457   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 458 




























 459   // Special handling for humongous regions.
 460   if (isHumongous()) {
 461     return do_oops_on_card_in_humongous(mr, cl, this, g1h);
 462   }
 463   assert(is_old(), "precondition");
 464 
 465   // Because mr has been trimmed to what's been allocated in this
 466   // region, the parts of the heap that are examined here are always
 467   // parsable; there's no need to use klass_or_null to detect
 468   // in-progress allocation.

 469 
 470   // Cache the boundaries of the memory region in some const locals
 471   HeapWord* const start = mr.start();
 472   HeapWord* const end = mr.end();
 473 
 474   // Find the obj that extends onto mr.start().
 475   // Update BOT as needed while finding start of (possibly dead)
 476   // object containing the start of the region.
 477   HeapWord* cur = block_start(start);
 478 
 479 #ifdef ASSERT
 480   {
 481     assert(cur <= start,
 482            err_msg("cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)));
 483     HeapWord* next = cur + block_size(cur);
 484     assert(start < next,
 485            err_msg("start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)));
 486   }
 487 #endif
 488 


< prev index next >