< prev index next >

src/share/vm/memory/cardTableModRefBS.cpp

Print this page




 423                   p2i(addr_for((jbyte*) _committed[ind].start())),
 424                   p2i(addr_for((jbyte*) _committed[ind].last())));
 425   }
 426   // Touch the last card of the covered region to show that it
 427   // is committed (or SEGV).
 428   debug_only((void) (*byte_for(_covered[ind].last()));)
 429   debug_only(verify_guard();)
 430 }
 431 
 432 // Note that these versions are precise!  The scanning code has to handle the
 433 // fact that the write barrier may be either precise or imprecise.
 434 
 435 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
 436   inline_write_ref_field(field, newVal, release);
 437 }
 438 
 439 
 440 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 441                                                                  MemRegion mr,
 442                                                                  OopsInGenClosure* cl,
 443                                                                  CardTableRS* ct) {

 444   if (!mr.is_empty()) {
 445     // Caller (process_roots()) claims that all GC threads
 446     // execute this call.  With UseDynamicNumberOfGCThreads now all
 447     // active GC threads execute this call.  The number of active GC
 448     // threads needs to be passed to par_non_clean_card_iterate_work()
 449     // to get proper partitioning and termination.
 450     //
 451     // This is an example of where n_par_threads() is used instead
 452     // of workers()->active_workers().  n_par_threads can be set to 0 to
 453     // turn off parallelism.  For example when this code is called as
 454     // part of verification during root processing then n_par_threads()
 455     // may have been set to 0. active_workers is not overloaded with
 456     // the meaning that it is a switch to disable parallelism and so keeps
 457     // the meaning of the number of active gc workers. If parallelism has
 458     // not been shut off by setting n_par_threads to 0, then n_par_threads
 459     // should be equal to active_workers.  When a different mechanism for
 460     // shutting off parallelism is used, then active_workers can be used in
 461     // place of n_par_threads.
 462     int n_threads =  GenCollectedHeap::heap()->n_par_threads();
 463     bool is_par = n_threads > 0;
 464     if (is_par) {
 465 #if INCLUDE_ALL_GCS
 466       assert(GenCollectedHeap::heap()->n_par_threads() ==
 467              GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
 468       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 469 #else  // INCLUDE_ALL_GCS
 470       fatal("Parallel gc not supported here.");
 471 #endif // INCLUDE_ALL_GCS
 472     } else {
 473       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 474 
 475       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary());
 476       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
 477 
 478       clear_cl.do_MemRegion(mr);
 479     }
 480   }
 481 }
 482 
 483 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 484   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 485   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 486   jbyte* cur  = byte_for(mr.start());
 487   jbyte* last = byte_after(mr.last());
 488   while (cur < last) {
 489     *cur = dirty_card;
 490     cur++;
 491   }
 492 }
 493 
 494 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 495   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 496   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );




 423                   p2i(addr_for((jbyte*) _committed[ind].start())),
 424                   p2i(addr_for((jbyte*) _committed[ind].last())));
 425   }
 426   // Touch the last card of the covered region to show that it
 427   // is committed (or SEGV).
 428   debug_only((void) (*byte_for(_covered[ind].last()));)
 429   debug_only(verify_guard();)
 430 }
 431 
 432 // Note that these versions are precise!  The scanning code has to handle the
 433 // fact that the write barrier may be either precise or imprecise.
 434 
 435 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
 436   inline_write_ref_field(field, newVal, release);
 437 }
 438 
 439 
 440 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 441                                                                  MemRegion mr,
 442                                                                  OopsInGenClosure* cl,
 443                                                                  CardTableRS* ct,
 444                                                                  uint n_threads) {
 445   if (!mr.is_empty()) {
 446     if (n_threads > 0) {



















 447 #if INCLUDE_ALL_GCS


 448       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 449 #else  // INCLUDE_ALL_GCS
 450       fatal("Parallel gc not supported here.");
 451 #endif // INCLUDE_ALL_GCS
 452     } else {
 453       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 454 
 455       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), false);
 456       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, false);
 457 
 458       clear_cl.do_MemRegion(mr);
 459     }
 460   }
 461 }
 462 
 463 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 464   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 465   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 466   jbyte* cur  = byte_for(mr.start());
 467   jbyte* last = byte_after(mr.last());
 468   while (cur < last) {
 469     *cur = dirty_card;
 470     cur++;
 471   }
 472 }
 473 
 474 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 475   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 476   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );


< prev index next >