< prev index next >

src/share/vm/gc/g1/heapRegion.cpp

Print this page
rev 8802 : G1 performance improvements: card batching, joining, sorting, prefetching and write barrier fence elision and simplification based on a global syncrhonization using handshakes piggybacking on thread-local safepoints.


 344   // Otherwise, find the obj that extends onto mr.start().
 345 
 346   assert(cur <= mr.start()
 347          && (oop(cur)->klass_or_null() == NULL ||
 348              cur + oop(cur)->size() > mr.start()),
 349          "postcondition of block_start");
 350   oop obj;
 351   while (cur < mr.end()) {
 352     obj = oop(cur);
 353     if (obj->klass_or_null() == NULL) {
 354       // Ran into an unparseable point.
 355       return cur;
 356     } else if (!g1h->is_obj_dead(obj)) {
 357       cl->do_object(obj);
 358     }
 359     cur += block_size(cur);
 360   }
 361   return NULL;
 362 }
 363 
 364 HeapWord*
 365 HeapRegion::
 366 oops_on_card_seq_iterate_careful(MemRegion mr,
 367                                  FilterOutOfRegionClosure* cl,
 368                                  bool filter_young,
 369                                  jbyte* card_ptr) {
 370   // Currently, we should only have to clean the card if filter_young
 371   // is true and vice versa.
 372   if (filter_young) {
 373     assert(card_ptr != NULL, "pre-condition");
 374   } else {
 375     assert(card_ptr == NULL, "pre-condition");
 376   }
 377   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 378 
 379   // If we're within a stop-world GC, then we might look at a card in a
 380   // GC alloc region that extends onto a GC LAB, which may not be
 381   // parseable.  Stop such at the "scan_top" of the region.
 382   if (g1h->is_gc_active()) {
 383     mr = mr.intersection(MemRegion(bottom(), scan_top()));
 384   } else {
 385     mr = mr.intersection(used_region());
 386   }
 387   if (mr.is_empty()) return NULL;
 388   // Otherwise, find the obj that extends onto mr.start().
 389 
 390   // The intersection of the incoming mr (for the card) and the
 391   // allocated part of the region is non-empty. This implies that
 392   // we have actually allocated into this region. The code in
 393   // G1CollectedHeap.cpp that allocates a new region sets the
 394   // is_young tag on the region before allocating. Thus we
 395   // safely know if this region is young.
 396   if (is_young() && filter_young) {
 397     return NULL;
 398   }
 399 
 400   assert(!is_young(), "check value of filter_young");
 401 
 402   // We can only clean the card here, after we make the decision that
 403   // the card is not young. And we only clean the card if we have been
 404   // asked to (i.e., card_ptr != NULL).
 405   if (card_ptr != NULL) {
 406     *card_ptr = CardTableModRefBS::clean_card_val();
 407     // We must complete this write before we do any of the reads below.
 408     OrderAccess::storeload();
 409   }
 410 








 411   // Cache the boundaries of the memory region in some const locals
 412   HeapWord* const start = mr.start();
 413   HeapWord* const end = mr.end();
 414 


 415   // We used to use "block_start_careful" here.  But we're actually happy
 416   // to update the BOT while we do this...
 417   HeapWord* cur = block_start(start);
 418   assert(cur <= start, "Postcondition");
 419 
 420   oop obj;
 421 
 422   HeapWord* next = cur;
 423   do {
 424     cur = next;
 425     obj = oop(cur);
 426     if (obj->klass_or_null() == NULL) {
 427       // Ran into an unparseable point.
 428       return cur;
 429     }
 430     // Otherwise...
 431     next = cur + block_size(cur);
 432   } while (next <= start);
 433 
 434   // If we finish the above loop...We have a parseable object that
 435   // begins on or before the start of the memory region, and ends
 436   // inside or spans the entire region.
 437   assert(cur <= start, "Loop postcondition");


 447 
 448     // Advance the current pointer. "obj" still points to the object to iterate.
 449     cur = cur + block_size(cur);
 450 
 451     if (!g1h->is_obj_dead(obj)) {
 452       // Non-objArrays are sometimes marked imprecise at the object start. We
 453       // always need to iterate over them in full.
 454       // We only iterate over object arrays in full if they are completely contained
 455       // in the memory region.
 456       if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
 457         obj->oop_iterate(cl);
 458       } else {
 459         obj->oop_iterate(cl, mr);
 460       }
 461     }
 462   } while (cur < end);
 463 
 464   return NULL;
 465 }
 466 











 467 // Code roots support
 468 
 469 void HeapRegion::add_strong_code_root(nmethod* nm) {
 470   HeapRegionRemSet* hrrs = rem_set();
 471   hrrs->add_strong_code_root(nm);
 472 }
 473 
 474 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
 475   assert_locked_or_safepoint(CodeCache_lock);
 476   HeapRegionRemSet* hrrs = rem_set();
 477   hrrs->add_strong_code_root_locked(nm);
 478 }
 479 
 480 void HeapRegion::remove_strong_code_root(nmethod* nm) {
 481   HeapRegionRemSet* hrrs = rem_set();
 482   hrrs->remove_strong_code_root(nm);
 483 }
 484 
 485 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
 486   HeapRegionRemSet* hrrs = rem_set();


1012   }
1013 }
1014 
1015 G1OffsetTableContigSpace::
1016 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1017                          MemRegion mr) :
1018   _offsets(sharedOffsetArray, mr),
1019   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1020   _gc_time_stamp(0)
1021 {
1022   _offsets.set_space(this);
1023 }
1024 
1025 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
1026   CompactibleSpace::initialize(mr, clear_space, mangle_space);
1027   _top = bottom();
1028   _scan_top = bottom();
1029   set_saved_mark_word(NULL);
1030   reset_bot();
1031 }
1032 


 344   // Otherwise, find the obj that extends onto mr.start().
 345 
 346   assert(cur <= mr.start()
 347          && (oop(cur)->klass_or_null() == NULL ||
 348              cur + oop(cur)->size() > mr.start()),
 349          "postcondition of block_start");
 350   oop obj;
 351   while (cur < mr.end()) {
 352     obj = oop(cur);
 353     if (obj->klass_or_null() == NULL) {
 354       // Ran into an unparseable point.
 355       return cur;
 356     } else if (!g1h->is_obj_dead(obj)) {
 357       cl->do_object(obj);
 358     }
 359     cur += block_size(cur);
 360   }
 361   return NULL;
 362 }
 363 
 364 
 365 bool HeapRegion::clean_card(MemRegion& mr,


 366                             bool filter_young,
 367                             jbyte* &card_ptr) {
 368   // Currently, we should only have to clean the card if filter_young
 369   // is true and vice versa.
 370   if (filter_young) {
 371     assert(card_ptr != NULL, "pre-condition");
 372   } else {
 373     assert(card_ptr == NULL, "pre-condition");
 374   }
 375   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 376 
 377   // If we're within a stop-world GC, then we might look at a card in a
 378   // GC alloc region that extends onto a GC LAB, which may not be
 379   // parseable.  Stop such at the "scan_top" of the region.
 380   if (g1h->is_gc_active()) {
 381     mr = mr.intersection(MemRegion(bottom(), scan_top()));
 382   } else {
 383     mr = mr.intersection(used_region());
 384   }
 385   if (mr.is_empty()) return false;
 386   // Otherwise, find the obj that extends onto mr.start().
 387 
 388   // The intersection of the incoming mr (for the card) and the
 389   // allocated part of the region is non-empty. This implies that
 390   // we have actually allocated into this region. The code in
 391   // G1CollectedHeap.cpp that allocates a new region sets the
 392   // is_young tag on the region before allocating. Thus we
 393   // safely know if this region is young.
 394   if (is_young() && filter_young) {
 395     return false;
 396   }
 397 
 398   assert(!is_young(), "check value of filter_young");
 399 
 400   // We can only clean the card here, after we make the decision that
 401   // the card is not young. And we only clean the card if we have been
 402   // asked to (i.e., card_ptr != NULL).
 403   if (card_ptr != NULL) {
 404     *card_ptr = CardTableModRefBS::clean_card_val();


 405   }
 406 
 407   return true;
 408 }
 409 
 410 HeapWord* HeapRegion::process_oops_on_card(MemRegion mr,
 411                                            FilterOutOfRegionClosure *cl,
 412                                            jbyte *card_ptr) {
 413   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 414   G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
 415   // Cache the boundaries of the memory region in some const locals
 416   HeapWord* const start = mr.start();
 417   HeapWord* const end = mr.end();
 418 
 419   HeapWord* cur;
 420 
 421   // We used to use "block_start_careful" here.  But we're actually happy
 422   // to update the BOT while we do this...
 423   cur = block_start(start);
 424   assert(cur <= start, "Postcondition");
 425 
 426   oop obj;
 427 
 428   HeapWord* next = cur;
 429   do {
 430     cur = next;
 431     obj = oop(cur);
 432     if (obj->klass_or_null() == NULL) {
 433       // Ran into an unparseable point.
 434       return cur;
 435     }
 436     // Otherwise...
 437     next = cur + block_size(cur);
 438   } while (next <= start);
 439 
 440   // If we finish the above loop...We have a parseable object that
 441   // begins on or before the start of the memory region, and ends
 442   // inside or spans the entire region.
 443   assert(cur <= start, "Loop postcondition");


 453 
 454     // Advance the current pointer. "obj" still points to the object to iterate.
 455     cur = cur + block_size(cur);
 456 
 457     if (!g1h->is_obj_dead(obj)) {
 458       // Non-objArrays are sometimes marked imprecise at the object start. We
 459       // always need to iterate over them in full.
 460       // We only iterate over object arrays in full if they are completely contained
 461       // in the memory region.
 462       if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
 463         obj->oop_iterate(cl);
 464       } else {
 465         obj->oop_iterate(cl, mr);
 466       }
 467     }
 468   } while (cur < end);
 469 
 470   return NULL;
 471 }
 472 
 473 HeapWord*
 474 HeapRegion::
 475 oops_on_card_seq_iterate_careful(MemRegion mr,
 476                                  FilterOutOfRegionClosure* cl,
 477                                  bool filter_young,
 478                                  jbyte* card_ptr) {
 479   if (!clean_card(mr, filter_young, card_ptr)) return NULL;
 480   if (card_ptr != NULL) OrderAccess::storeload();   // serialize card cleaning
 481   return process_oops_on_card(mr, cl, card_ptr);
 482 }
 483 
 484 // Code roots support
 485 
 486 void HeapRegion::add_strong_code_root(nmethod* nm) {
 487   HeapRegionRemSet* hrrs = rem_set();
 488   hrrs->add_strong_code_root(nm);
 489 }
 490 
 491 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
 492   assert_locked_or_safepoint(CodeCache_lock);
 493   HeapRegionRemSet* hrrs = rem_set();
 494   hrrs->add_strong_code_root_locked(nm);
 495 }
 496 
 497 void HeapRegion::remove_strong_code_root(nmethod* nm) {
 498   HeapRegionRemSet* hrrs = rem_set();
 499   hrrs->remove_strong_code_root(nm);
 500 }
 501 
 502 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
 503   HeapRegionRemSet* hrrs = rem_set();


1029   }
1030 }
1031 
1032 G1OffsetTableContigSpace::
1033 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1034                          MemRegion mr) :
1035   _offsets(sharedOffsetArray, mr),
1036   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1037   _gc_time_stamp(0)
1038 {
1039   _offsets.set_space(this);
1040 }
1041 
1042 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
1043   CompactibleSpace::initialize(mr, clear_space, mangle_space);
1044   _top = bottom();
1045   _scan_top = bottom();
1046   set_saved_mark_word(NULL);
1047   reset_bot();
1048 }

< prev index next >