423 p2i(addr_for((jbyte*) _committed[ind].start())), 424 p2i(addr_for((jbyte*) _committed[ind].last()))); 425 } 426 // Touch the last card of the covered region to show that it 427 // is committed (or SEGV). 428 debug_only((void) (*byte_for(_covered[ind].last()));) 429 debug_only(verify_guard();) 430 } 431 432 // Note that these versions are precise! The scanning code has to handle the 433 // fact that the write barrier may be either precise or imprecise. 434 435 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { 436 inline_write_ref_field(field, newVal, release); 437 } 438 439 440 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, 441 MemRegion mr, 442 OopsInGenClosure* cl, 443 CardTableRS* ct) { 444 if (!mr.is_empty()) { 445 // Caller (process_roots()) claims that all GC threads 446 // execute this call. With UseDynamicNumberOfGCThreads now all 447 // active GC threads execute this call. The number of active GC 448 // threads needs to be passed to par_non_clean_card_iterate_work() 449 // to get proper partitioning and termination. 450 // 451 // This is an example of where n_par_threads() is used instead 452 // of workers()->active_workers(). n_par_threads can be set to 0 to 453 // turn off parallelism. For example when this code is called as 454 // part of verification during root processing then n_par_threads() 455 // may have been set to 0. active_workers is not overloaded with 456 // the meaning that it is a switch to disable parallelism and so keeps 457 // the meaning of the number of active gc workers. If parallelism has 458 // not been shut off by setting n_par_threads to 0, then n_par_threads 459 // should be equal to active_workers. When a different mechanism for 460 // shutting off parallelism is used, then active_workers can be used in 461 // place of n_par_threads. 462 int n_threads = GenCollectedHeap::heap()->n_par_threads(); 463 bool is_par = n_threads > 0; 464 if (is_par) { 465 #if INCLUDE_ALL_GCS 466 assert(GenCollectedHeap::heap()->n_par_threads() == 467 GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); 468 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 469 #else // INCLUDE_ALL_GCS 470 fatal("Parallel gc not supported here."); 471 #endif // INCLUDE_ALL_GCS 472 } else { 473 // clear_cl finds contiguous dirty ranges of cards to process and clear. 474 475 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary()); 476 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); 477 478 clear_cl.do_MemRegion(mr); 479 } 480 } 481 } 482 483 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { 484 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 485 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 486 jbyte* cur = byte_for(mr.start()); 487 jbyte* last = byte_after(mr.last()); 488 while (cur < last) { 489 *cur = dirty_card; 490 cur++; 491 } 492 } 493 494 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { 495 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 496 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); | 423 p2i(addr_for((jbyte*) _committed[ind].start())), 424 p2i(addr_for((jbyte*) _committed[ind].last()))); 425 } 426 // Touch the last card of the covered region to show that it 427 // is committed (or SEGV). 428 debug_only((void) (*byte_for(_covered[ind].last()));) 429 debug_only(verify_guard();) 430 } 431 432 // Note that these versions are precise! The scanning code has to handle the 433 // fact that the write barrier may be either precise or imprecise. 434 435 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { 436 inline_write_ref_field(field, newVal, release); 437 } 438 439 440 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, 441 MemRegion mr, 442 OopsInGenClosure* cl, 443 CardTableRS* ct, 444 uint n_threads) { 445 if (!mr.is_empty()) { 446 if (n_threads > 0) { 447 #if INCLUDE_ALL_GCS 448 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 449 #else // INCLUDE_ALL_GCS 450 fatal("Parallel gc not supported here."); 451 #endif // INCLUDE_ALL_GCS 452 } else { 453 // clear_cl finds contiguous dirty ranges of cards to process and clear. 454 455 // This is the single-threaded version used by DefNew. 456 const bool parallel = false; 457 458 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); 459 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); 460 461 clear_cl.do_MemRegion(mr); 462 } 463 } 464 } 465 466 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { 467 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 468 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 469 jbyte* cur = byte_for(mr.start()); 470 jbyte* last = byte_after(mr.last()); 471 while (cur < last) { 472 *cur = dirty_card; 473 cur++; 474 } 475 } 476 477 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { 478 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 479 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |