< prev index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page




 550       //   if successful, goto Start.
 551       continue;
 552 
 553       // try global overflow list.
 554     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
 555       continue;
 556     }
 557 
 558     // Otherwise, offer termination.
 559     par_scan_state()->start_term_time();
 560     if (terminator()->offer_termination()) break;
 561     par_scan_state()->end_term_time();
 562   }
 563   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 564          "Broken overflow list?");
 565   // Finish the last termination pause.
 566   par_scan_state()->end_term_time();
 567 }
 568 
 569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 570                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :

 571     AbstractGangTask("ParNewGeneration collection"),
 572     _gen(gen), _old_gen(old_gen),
 573     _young_old_boundary(young_old_boundary),
 574     _state_set(state_set)

 575   {}
 576 
 577 // Reset the terminator for the given number of
 578 // active threads.
 579 void ParNewGenTask::set_for_termination(uint active_workers) {
 580   _state_set->reset(active_workers, _gen->promotion_failed());
 581   // Should the heap be passed in?  There's only 1 for now so
 582   // grab it instead.
 583   GenCollectedHeap* gch = GenCollectedHeap::heap();
 584   gch->set_n_termination(active_workers);
 585 }
 586 
 587 void ParNewGenTask::work(uint worker_id) {
 588   GenCollectedHeap* gch = GenCollectedHeap::heap();
 589   // Since this is being done in a separate thread, need new resource
 590   // and handle marks.
 591   ResourceMark rm;
 592   HandleMark hm;
 593 
 594   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 595   assert(_state_set->is_valid(worker_id), "Should not have been called");
 596 
 597   par_scan_state.set_young_old_boundary(_young_old_boundary);
 598 
 599   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 600                                       gch->rem_set()->klass_rem_set());
 601   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 602                                            &par_scan_state.to_space_root_closure(),
 603                                            false);
 604 
 605   par_scan_state.start_strong_roots();
 606   gch->gen_process_roots(_gen->level(),

 607                          true,  // Process younger gens, if any,
 608                                 // as strong roots.
 609                          false, // no scope; this is parallel code
 610                          GenCollectedHeap::SO_ScavengeCodeCache,
 611                          GenCollectedHeap::StrongAndWeakRoots,
 612                          &par_scan_state.to_space_root_closure(),
 613                          &par_scan_state.older_gen_closure(),
 614                          &cld_scan_closure);
 615 
 616   par_scan_state.end_strong_roots();
 617 
 618   // "evacuate followers".
 619   par_scan_state.evacuate_followers_closure().do_void();
 620 }
 621 
 622 #ifdef _MSC_VER
 623 #pragma warning( push )
 624 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 625 #endif
 626 ParNewGeneration::
 627 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
 628   : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
 629   _overflow_list(NULL),


 935   // Capture heap used before collection (for printing).
 936   size_t gch_prev_used = gch->used();
 937 
 938   age_table()->clear();
 939   to()->clear(SpaceDecorator::Mangle);
 940 
 941   gch->save_marks();
 942   assert(workers != NULL, "Need parallel worker threads.");
 943   uint n_workers = active_workers;
 944 
 945   // Set the correct parallelism (number of queues) in the reference processor
 946   ref_processor()->set_active_mt_degree(n_workers);
 947 
 948   // Always set the terminator for the active number of workers
 949   // because only those workers go through the termination protocol.
 950   ParallelTaskTerminator _term(n_workers, task_queues());
 951   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 952                                          *to(), *this, *_old_gen, *task_queues(),
 953                                          _overflow_stacks, desired_plab_sz(), _term);
 954 
 955   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);



 956   gch->set_par_threads(n_workers);
 957   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 958   // It turns out that even when we're using 1 thread, doing the work in a
 959   // separate thread causes wide variance in run times.  We can't help this
 960   // in the multi-threaded case, but we special-case n=1 here to get
 961   // repeatable measurements of the 1-thread overhead of the parallel code.
 962   if (n_workers > 1) {
 963     StrongRootsScope srs;
 964     workers->run_task(&tsk);
 965   } else {
 966     StrongRootsScope srs;
 967     tsk.work(0);
 968   }


 969   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 970                          promotion_failed());
 971 
 972   // Trace and reset failed promotion info.
 973   if (promotion_failed()) {
 974     thread_state_set.trace_promotion_failed(gc_tracer());
 975   }
 976 
 977   // Process (weak) reference objects found during scavenge.
 978   ReferenceProcessor* rp = ref_processor();
 979   IsAliveClosure is_alive(this);
 980   ScanWeakRefClosure scan_weak_ref(this);
 981   KeepAliveClosure keep_alive(&scan_weak_ref);
 982   ScanClosure               scan_without_gc_barrier(this, false);
 983   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 984   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 985   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
 986     &scan_without_gc_barrier, &scan_with_gc_barrier);
 987   rp->setup_policy(clear_all_soft_refs);
 988   // Can  the mt_degree be set later (at run_task() time would be best)?




 550       //   if successful, goto Start.
 551       continue;
 552 
 553       // try global overflow list.
 554     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
 555       continue;
 556     }
 557 
 558     // Otherwise, offer termination.
 559     par_scan_state()->start_term_time();
 560     if (terminator()->offer_termination()) break;
 561     par_scan_state()->end_term_time();
 562   }
 563   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 564          "Broken overflow list?");
 565   // Finish the last termination pause.
 566   par_scan_state()->end_term_time();
 567 }
 568 
 569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 570                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
 571                              StrongRootsScope* strong_roots_scope) :
 572     AbstractGangTask("ParNewGeneration collection"),
 573     _gen(gen), _old_gen(old_gen),
 574     _young_old_boundary(young_old_boundary),
 575     _state_set(state_set),
 576     _strong_roots_scope(strong_roots_scope)
 577   {}
 578 
 579 // Reset the terminator for the given number of
 580 // active threads.
 581 void ParNewGenTask::set_for_termination(uint active_workers) {
 582   _state_set->reset(active_workers, _gen->promotion_failed());
 583   // Should the heap be passed in?  There's only 1 for now so
 584   // grab it instead.
 585   GenCollectedHeap* gch = GenCollectedHeap::heap();
 586   gch->set_n_termination(active_workers);
 587 }
 588 
 589 void ParNewGenTask::work(uint worker_id) {
 590   GenCollectedHeap* gch = GenCollectedHeap::heap();
 591   // Since this is being done in a separate thread, need new resource
 592   // and handle marks.
 593   ResourceMark rm;
 594   HandleMark hm;
 595 
 596   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 597   assert(_state_set->is_valid(worker_id), "Should not have been called");
 598 
 599   par_scan_state.set_young_old_boundary(_young_old_boundary);
 600 
 601   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 602                                       gch->rem_set()->klass_rem_set());
 603   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 604                                            &par_scan_state.to_space_root_closure(),
 605                                            false);
 606 
 607   par_scan_state.start_strong_roots();
 608   gch->gen_process_roots(_strong_roots_scope,
 609                          _gen->level(),
 610                          true,  // Process younger gens, if any,
 611                                 // as strong roots.

 612                          GenCollectedHeap::SO_ScavengeCodeCache,
 613                          GenCollectedHeap::StrongAndWeakRoots,
 614                          &par_scan_state.to_space_root_closure(),
 615                          &par_scan_state.older_gen_closure(),
 616                          &cld_scan_closure);
 617 
 618   par_scan_state.end_strong_roots();
 619 
 620   // "evacuate followers".
 621   par_scan_state.evacuate_followers_closure().do_void();
 622 }
 623 
 624 #ifdef _MSC_VER
 625 #pragma warning( push )
 626 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 627 #endif
 628 ParNewGeneration::
 629 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
 630   : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
 631   _overflow_list(NULL),


 937   // Capture heap used before collection (for printing).
 938   size_t gch_prev_used = gch->used();
 939 
 940   age_table()->clear();
 941   to()->clear(SpaceDecorator::Mangle);
 942 
 943   gch->save_marks();
 944   assert(workers != NULL, "Need parallel worker threads.");
 945   uint n_workers = active_workers;
 946 
 947   // Set the correct parallelism (number of queues) in the reference processor
 948   ref_processor()->set_active_mt_degree(n_workers);
 949 
 950   // Always set the terminator for the active number of workers
 951   // because only those workers go through the termination protocol.
 952   ParallelTaskTerminator _term(n_workers, task_queues());
 953   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 954                                          *to(), *this, *_old_gen, *task_queues(),
 955                                          _overflow_stacks, desired_plab_sz(), _term);
 956 
 957   {
 958     StrongRootsScope srs(n_workers);
 959 
 960     ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
 961     gch->set_par_threads(n_workers);
 962     gch->rem_set()->prepare_for_younger_refs_iterate(true);
 963     // It turns out that even when we're using 1 thread, doing the work in a
 964     // separate thread causes wide variance in run times.  We can't help this
 965     // in the multi-threaded case, but we special-case n=1 here to get
 966     // repeatable measurements of the 1-thread overhead of the parallel code.
 967     if (n_workers > 1) {

 968       workers->run_task(&tsk);
 969     } else {

 970       tsk.work(0);
 971     }
 972   }
 973 
 974   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 975                          promotion_failed());
 976 
 977   // Trace and reset failed promotion info.
 978   if (promotion_failed()) {
 979     thread_state_set.trace_promotion_failed(gc_tracer());
 980   }
 981 
 982   // Process (weak) reference objects found during scavenge.
 983   ReferenceProcessor* rp = ref_processor();
 984   IsAliveClosure is_alive(this);
 985   ScanWeakRefClosure scan_weak_ref(this);
 986   KeepAliveClosure keep_alive(&scan_weak_ref);
 987   ScanClosure               scan_without_gc_barrier(this, false);
 988   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 989   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 990   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
 991     &scan_without_gc_barrier, &scan_with_gc_barrier);
 992   rp->setup_policy(clear_all_soft_refs);
 993   // Can  the mt_degree be set later (at run_task() time would be best)?


< prev index next >