< prev index next >

src/share/vm/memory/cardTableModRefBS.cpp

Print this page




 442                                                                  CardTableRS* ct) {
 443   if (!mr.is_empty()) {
 444     // Caller (process_roots()) claims that all GC threads
 445     // execute this call.  With UseDynamicNumberOfGCThreads now all
 446     // active GC threads execute this call.  The number of active GC
 447     // threads needs to be passed to par_non_clean_card_iterate_work()
 448     // to get proper partitioning and termination.
 449     //
 450     // This is an example of where n_par_threads() is used instead
 451     // of workers()->active_workers().  n_par_threads can be set to 0 to
 452     // turn off parallelism.  For example when this code is called as
 453     // part of verification and SharedHeap::process_roots() is being
 454     // used, then n_par_threads() may have been set to 0.  active_workers
 455     // is not overloaded with the meaning that it is a switch to disable
 456     // parallelism and so keeps the meaning of the number of
 457     // active gc workers.  If parallelism has not been shut off by
 458     // setting n_par_threads to 0, then n_par_threads should be
 459     // equal to active_workers.  When a different mechanism for shutting
 460     // off parallelism is used, then active_workers can be used in
 461     // place of n_par_threads.
 462     int n_threads =  SharedHeap::heap()->n_par_threads();
 463     bool is_par = n_threads > 0;
 464     if (is_par) {
 465 #if INCLUDE_ALL_GCS
 466       assert(SharedHeap::heap()->n_par_threads() ==
 467              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
 468       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 469 #else  // INCLUDE_ALL_GCS
 470       fatal("Parallel gc not supported here.");
 471 #endif // INCLUDE_ALL_GCS
 472     } else {
 473       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 474 
 475       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary());
 476       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
 477 
 478       clear_cl.do_MemRegion(mr);
 479     }
 480   }
 481 }
 482 
 483 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 484   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 485   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 486   jbyte* cur  = byte_for(mr.start());
 487   jbyte* last = byte_after(mr.last());




 442                                                                  CardTableRS* ct) {
 443   if (!mr.is_empty()) {
 444     // Caller (process_roots()) claims that all GC threads
 445     // execute this call.  With UseDynamicNumberOfGCThreads now all
 446     // active GC threads execute this call.  The number of active GC
 447     // threads needs to be passed to par_non_clean_card_iterate_work()
 448     // to get proper partitioning and termination.
 449     //
 450     // This is an example of where n_par_threads() is used instead
 451     // of workers()->active_workers().  n_par_threads can be set to 0 to
 452     // turn off parallelism.  For example when this code is called as
 453     // part of verification and SharedHeap::process_roots() is being
 454     // used, then n_par_threads() may have been set to 0.  active_workers
 455     // is not overloaded with the meaning that it is a switch to disable
 456     // parallelism and so keeps the meaning of the number of
 457     // active gc workers.  If parallelism has not been shut off by
 458     // setting n_par_threads to 0, then n_par_threads should be
 459     // equal to active_workers.  When a different mechanism for shutting
 460     // off parallelism is used, then active_workers can be used in
 461     // place of n_par_threads.
 462     int n_threads =  GenCollectedHeap::heap()->n_par_threads();
 463     bool is_par = n_threads > 0;
 464     if (is_par) {
 465 #if INCLUDE_ALL_GCS
 466       assert(GenCollectedHeap::heap()->n_par_threads() ==
 467              GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
 468       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 469 #else  // INCLUDE_ALL_GCS
 470       fatal("Parallel gc not supported here.");
 471 #endif // INCLUDE_ALL_GCS
 472     } else {
 473       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 474 
 475       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary());
 476       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
 477 
 478       clear_cl.do_MemRegion(mr);
 479     }
 480   }
 481 }
 482 
 483 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 484   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 485   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 486   jbyte* cur  = byte_for(mr.start());
 487   jbyte* last = byte_after(mr.last());


< prev index next >