< prev index next >

src/share/vm/gc/cms/parCardTableModRefBS.cpp

Print this page

        

*** 37,56 **** #include "runtime/vmThread.hpp" void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, OopsInGenClosure* cl, CardTableRS* ct, ! int n_threads) { assert(n_threads > 0, "Error: expected n_threads > 0"); ! assert((n_threads == 1 && ParallelGCThreads == 0) || ! n_threads <= (int)ParallelGCThreads, ! "# worker threads != # requested!"); ! assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread"); ! assert(UseDynamicNumberOfGCThreads || ! !FLAG_IS_DEFAULT(ParallelGCThreads) || ! n_threads == (int)ParallelGCThreads, ! "# worker threads != # requested!"); // Make sure the LNC array is valid for the space. jbyte** lowest_non_clean; uintptr_t lowest_non_clean_base_chunk_index; size_t lowest_non_clean_chunk_size; get_LNC_array_for_space(sp, lowest_non_clean, --- 37,51 ---- #include "runtime/vmThread.hpp" void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, OopsInGenClosure* cl, CardTableRS* ct, ! uint n_threads) { assert(n_threads > 0, "Error: expected n_threads > 0"); ! assert(n_threads <= (uint)ParallelGCThreads, ! err_msg("Error: n_threads: %u > ParallelGCThreads: %u", n_threads, (uint)ParallelGCThreads)); ! // Make sure the LNC array is valid for the space. jbyte** lowest_non_clean; uintptr_t lowest_non_clean_base_chunk_index; size_t lowest_non_clean_chunk_size; get_LNC_array_for_space(sp, lowest_non_clean,
*** 64,74 **** pst->set_n_threads(n_threads); pst->set_n_tasks(n_strides); uint stride = 0; while (!pst->is_task_claimed(/* reference */ stride)) { ! process_stride(sp, mr, stride, n_strides, cl, ct, lowest_non_clean, lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); } if (pst->all_tasks_completed()) { --- 59,70 ---- pst->set_n_threads(n_threads); pst->set_n_tasks(n_strides); uint stride = 0; while (!pst->is_task_claimed(/* reference */ stride)) { ! process_stride(sp, mr, stride, n_strides, ! cl, ct, lowest_non_clean, lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); } if (pst->all_tasks_completed()) {
*** 130,142 **** chunk_card_end >= end_card ? used.end() : addr_for(chunk_card_end)); assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), ! cl->gen_boundary()); ! ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); // Process the chunk. process_chunk_boundaries(sp, dcto_cl, --- 126,142 ---- chunk_card_end >= end_card ? used.end() : addr_for(chunk_card_end)); assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); + // This function is used by the parallel card table iteration. + const bool parallel = true; + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), ! cl->gen_boundary(), ! parallel); ! ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); // Process the chunk. process_chunk_boundaries(sp, dcto_cl,
< prev index next >