< prev index next >

src/share/vm/gc/shared/cardTableModRefBS.cpp

Print this page




 435 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
 436   inline_write_ref_field(field, newVal, release);
 437 }
 438 
 439 
 440 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 441                                                                  MemRegion mr,
 442                                                                  OopsInGenClosure* cl,
 443                                                                  CardTableRS* ct,
 444                                                                  uint n_threads) {
 445   if (!mr.is_empty()) {
 446     if (n_threads > 0) {
 447 #if INCLUDE_ALL_GCS
 448       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 449 #else  // INCLUDE_ALL_GCS
 450       fatal("Parallel gc not supported here.");
 451 #endif // INCLUDE_ALL_GCS
 452     } else {
 453       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 454 
 455       // This is the single-threaded version.
 456       const bool parallel = false;
 457 
 458       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
 459       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
 460 
 461       clear_cl.do_MemRegion(mr);
 462     }
 463   }
 464 }
 465 
 466 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 467   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 468   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 469   jbyte* cur  = byte_for(mr.start());
 470   jbyte* last = byte_after(mr.last());
 471   while (cur < last) {
 472     *cur = dirty_card;
 473     cur++;
 474   }
 475 }




 435 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
 436   inline_write_ref_field(field, newVal, release);
 437 }
 438 
 439 
 440 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 441                                                                  MemRegion mr,
 442                                                                  OopsInGenClosure* cl,
 443                                                                  CardTableRS* ct,
 444                                                                  uint n_threads) {
 445   if (!mr.is_empty()) {
 446     if (n_threads > 0) {
 447 #if INCLUDE_ALL_GCS
 448       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 449 #else  // INCLUDE_ALL_GCS
 450       fatal("Parallel gc not supported here.");
 451 #endif // INCLUDE_ALL_GCS
 452     } else {
 453       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 454 
 455       // This is the single-threaded version used by DefNew.
 456       const bool parallel = false;
 457 
 458       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
 459       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
 460 
 461       clear_cl.do_MemRegion(mr);
 462     }
 463   }
 464 }
 465 
 466 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 467   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 468   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 469   jbyte* cur  = byte_for(mr.start());
 470   jbyte* last = byte_after(mr.last());
 471   while (cur < last) {
 472     *cur = dirty_card;
 473     cur++;
 474   }
 475 }


< prev index next >