< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 11459 : create_thread_failed: fix errors from thread creation failures
rev 11460 : refactor: switch to update_active_threads()


2871   // Whenever a CLD is found, it will be claimed before proceeding to mark
2872   // the klasses. The claimed marks need to be cleared before marking starts.
2873   ClassLoaderDataGraph::clear_claimed_marks();
2874 
2875   print_eden_and_survivor_chunk_arrays();
2876 
2877   {
2878 #if defined(COMPILER2) || INCLUDE_JVMCI
2879     DerivedPointerTableDeactivate dpt_deact;
2880 #endif
2881     if (CMSParallelInitialMarkEnabled) {
2882       // The parallel version.
2883       WorkGang* workers = gch->workers();
2884       assert(workers != NULL, "Need parallel worker threads.");
2885       uint n_workers = workers->active_workers();
2886 
2887       StrongRootsScope srs(n_workers);
2888 
2889       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2890       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2891       if (n_workers > 1) {



2892         workers->run_task(&tsk);
2893       } else {
2894         tsk.work(0);
2895       }
2896     } else {
2897       // The serial version.
2898       CLDToOopClosure cld_closure(&notOlder, true);
2899       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2900 
2901       StrongRootsScope srs(1);
2902 
2903       gch->gen_process_roots(&srs,
2904                              GenCollectedHeap::OldGen,
2905                              true,   // young gen as roots
2906                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2907                              should_unload_classes(),
2908                              &notOlder,
2909                              NULL,
2910                              &cld_closure);
2911     }


3490   // are almost identical into one for better maintainability and
3491   // readability. See 6445193.
3492   //
3493   // Tony 2006.06.29
3494   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3495                    ConcurrentMarkSweepThread::should_yield() &&
3496                    !CMSCollector::foregroundGCIsActive(); ++i) {
3497     os::sleep(Thread::current(), 1, false);
3498   }
3499 
3500   ConcurrentMarkSweepThread::synchronize(true);
3501   _bit_map_lock->lock_without_safepoint_check();
3502   _collector->startTimer();
3503 }
3504 
3505 bool CMSCollector::do_marking_mt() {
3506   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3507   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3508                                                                   conc_workers()->active_workers(),
3509                                                                   Threads::number_of_non_daemon_threads());
3510   conc_workers()->set_active_workers(num_workers);
3511 
3512   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3513 
3514   CMSConcMarkingTask tsk(this,
3515                          cms_space,
3516                          conc_workers(),
3517                          task_queues());
3518 
3519   // Since the actual number of workers we get may be different
3520   // from the number we requested above, do we need to do anything different
3521   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3522   // class?? XXX
3523   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3524 
3525   // Refs discovery is already non-atomic.
3526   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3527   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3528   conc_workers()->start_task(&tsk);
3529   while (tsk.yielded()) {
3530     tsk.coordinator_yield();




2871   // Whenever a CLD is found, it will be claimed before proceeding to mark
2872   // the klasses. The claimed marks need to be cleared before marking starts.
2873   ClassLoaderDataGraph::clear_claimed_marks();
2874 
2875   print_eden_and_survivor_chunk_arrays();
2876 
2877   {
2878 #if defined(COMPILER2) || INCLUDE_JVMCI
2879     DerivedPointerTableDeactivate dpt_deact;
2880 #endif
2881     if (CMSParallelInitialMarkEnabled) {
2882       // The parallel version.
2883       WorkGang* workers = gch->workers();
2884       assert(workers != NULL, "Need parallel worker threads.");
2885       uint n_workers = workers->active_workers();
2886 
2887       StrongRootsScope srs(n_workers);
2888 
2889       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2890       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2891       // If the total workers is greater than 1, then multiple workers
2892       // may be used at some time and the initialization has been set
2893       // such that the single threaded path cannot be used.
2894       if (workers->total_workers() > 1) {
2895         workers->run_task(&tsk);
2896       } else {
2897         tsk.work(0);
2898       }
2899     } else {
2900       // The serial version.
2901       CLDToOopClosure cld_closure(&notOlder, true);
2902       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2903 
2904       StrongRootsScope srs(1);
2905 
2906       gch->gen_process_roots(&srs,
2907                              GenCollectedHeap::OldGen,
2908                              true,   // young gen as roots
2909                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2910                              should_unload_classes(),
2911                              &notOlder,
2912                              NULL,
2913                              &cld_closure);
2914     }


3493   // are almost identical into one for better maintainability and
3494   // readability. See 6445193.
3495   //
3496   // Tony 2006.06.29
3497   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3498                    ConcurrentMarkSweepThread::should_yield() &&
3499                    !CMSCollector::foregroundGCIsActive(); ++i) {
3500     os::sleep(Thread::current(), 1, false);
3501   }
3502 
3503   ConcurrentMarkSweepThread::synchronize(true);
3504   _bit_map_lock->lock_without_safepoint_check();
3505   _collector->startTimer();
3506 }
3507 
3508 bool CMSCollector::do_marking_mt() {
3509   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3510   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3511                                                                   conc_workers()->active_workers(),
3512                                                                   Threads::number_of_non_daemon_threads());
3513   num_workers = conc_workers()->update_active_workers(num_workers);
3514 
3515   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3516 
3517   CMSConcMarkingTask tsk(this,
3518                          cms_space,
3519                          conc_workers(),
3520                          task_queues());
3521 
3522   // Since the actual number of workers we get may be different
3523   // from the number we requested above, do we need to do anything different
3524   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3525   // class?? XXX
3526   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3527 
3528   // Refs discovery is already non-atomic.
3529   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3530   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3531   conc_workers()->start_task(&tsk);
3532   while (tsk.yielded()) {
3533     tsk.coordinator_yield();


< prev index next >