< prev index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page




 957   int n_workers = active_workers;
 958 
 959   // Set the correct parallelism (number of queues) in the reference processor
 960   ref_processor()->set_active_mt_degree(n_workers);
 961 
 962   // Always set the terminator for the active number of workers
 963   // because only those workers go through the termination protocol.
 964   ParallelTaskTerminator _term(n_workers, task_queues());
 965   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 966                                          *to(), *this, *_old_gen, *task_queues(),
 967                                          _overflow_stacks, desired_plab_sz(), _term);
 968 
 969   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
 970   gch->set_par_threads(n_workers);
 971   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 972   // It turns out that even when we're using 1 thread, doing the work in a
 973   // separate thread causes wide variance in run times.  We can't help this
 974   // in the multi-threaded case, but we special-case n=1 here to get
 975   // repeatable measurements of the 1-thread overhead of the parallel code.
 976   if (n_workers > 1) {
 977     GenCollectedHeap::StrongRootsScope srs(gch);
 978     workers->run_task(&tsk);
 979   } else {
 980     GenCollectedHeap::StrongRootsScope srs(gch);
 981     tsk.work(0);
 982   }
 983   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 984                          promotion_failed());
 985 
 986   // Trace and reset failed promotion info.
 987   if (promotion_failed()) {
 988     thread_state_set.trace_promotion_failed(gc_tracer());
 989   }
 990 
 991   // Process (weak) reference objects found during scavenge.
 992   ReferenceProcessor* rp = ref_processor();
 993   IsAliveClosure is_alive(this);
 994   ScanWeakRefClosure scan_weak_ref(this);
 995   KeepAliveClosure keep_alive(&scan_weak_ref);
 996   ScanClosure               scan_without_gc_barrier(this, false);
 997   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 998   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 999   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1000     &scan_without_gc_barrier, &scan_with_gc_barrier);




 957   int n_workers = active_workers;
 958 
 959   // Set the correct parallelism (number of queues) in the reference processor
 960   ref_processor()->set_active_mt_degree(n_workers);
 961 
 962   // Always set the terminator for the active number of workers
 963   // because only those workers go through the termination protocol.
 964   ParallelTaskTerminator _term(n_workers, task_queues());
 965   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 966                                          *to(), *this, *_old_gen, *task_queues(),
 967                                          _overflow_stacks, desired_plab_sz(), _term);
 968 
 969   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
 970   gch->set_par_threads(n_workers);
 971   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 972   // It turns out that even when we're using 1 thread, doing the work in a
 973   // separate thread causes wide variance in run times.  We can't help this
 974   // in the multi-threaded case, but we special-case n=1 here to get
 975   // repeatable measurements of the 1-thread overhead of the parallel code.
 976   if (n_workers > 1) {
 977     GenCollectedHeap::StrongRootsScope srs;
 978     workers->run_task(&tsk);
 979   } else {
 980     GenCollectedHeap::StrongRootsScope srs;
 981     tsk.work(0);
 982   }
 983   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 984                          promotion_failed());
 985 
 986   // Trace and reset failed promotion info.
 987   if (promotion_failed()) {
 988     thread_state_set.trace_promotion_failed(gc_tracer());
 989   }
 990 
 991   // Process (weak) reference objects found during scavenge.
 992   ReferenceProcessor* rp = ref_processor();
 993   IsAliveClosure is_alive(this);
 994   ScanWeakRefClosure scan_weak_ref(this);
 995   KeepAliveClosure keep_alive(&scan_weak_ref);
 996   ScanClosure               scan_without_gc_barrier(this, false);
 997   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 998   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 999   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1000     &scan_without_gc_barrier, &scan_with_gc_barrier);


< prev index next >