< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page




 438   st->print("tot "); totals.print(st); st->cr();
 439 
 440   DEBUG_ONLY(totals.verify());
 441 }
 442 #endif // TASKQUEUE_STATS
 443 
 444 void ParScanThreadStateSet::flush() {
 445   // Work in this loop should be kept as lightweight as
 446   // possible since this might otherwise become a bottleneck
 447   // to scaling. Should we add heavy-weight work into this
 448   // loop, consider parallelizing the loop into the worker threads.
 449   for (int i = 0; i < length(); ++i) {
 450     ParScanThreadState& par_scan_state = thread_state(i);
 451 
 452     // Flush stats related to To-space PLAB activity and
 453     // retire the last buffer.
 454     par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
 455 
 456     // Every thread has its own age table.  We need to merge
 457     // them all into one.
 458     ageTable *local_table = par_scan_state.age_table();
 459     _young_gen.age_table()->merge(local_table);
 460 
 461     // Inform old gen that we're done.
 462     _old_gen.par_promote_alloc_done(i);
 463     _old_gen.par_oop_since_save_marks_iterate_done(i);
 464   }
 465 
 466   if (UseConcMarkSweepGC) {
 467     // We need to call this even when ResizeOldPLAB is disabled
 468     // so as to avoid breaking some asserts. While we may be able
 469     // to avoid this by reorganizing the code a bit, I am loathe
 470     // to do that unless we find cases where ergo leads to bad
 471     // performance.
 472     CFLS_LAB::compute_desired_plab_size();
 473   }
 474 }
 475 
 476 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 477                                ParScanThreadState* par_scan_state) :
 478   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
 479   _boundary = _g->reserved().end();
 480 }
 481 
 482 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 483 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 484 
 485 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 486 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 487 
 488 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 489 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 490 
 491 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 492 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }




 438   st->print("tot "); totals.print(st); st->cr();
 439 
 440   DEBUG_ONLY(totals.verify());
 441 }
 442 #endif // TASKQUEUE_STATS
 443 
 444 void ParScanThreadStateSet::flush() {
 445   // Work in this loop should be kept as lightweight as
 446   // possible since this might otherwise become a bottleneck
 447   // to scaling. Should we add heavy-weight work into this
 448   // loop, consider parallelizing the loop into the worker threads.
 449   for (int i = 0; i < length(); ++i) {
 450     ParScanThreadState& par_scan_state = thread_state(i);
 451 
 452     // Flush stats related to To-space PLAB activity and
 453     // retire the last buffer.
 454     par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
 455 
 456     // Every thread has its own age table.  We need to merge
 457     // them all into one.
 458     AgeTable *local_table = par_scan_state.age_table();
 459     _young_gen.age_table()->merge(local_table);
 460 
 461     // Inform old gen that we're done.
 462     _old_gen.par_promote_alloc_done(i);
 463     _old_gen.par_oop_since_save_marks_iterate_done(i);
 464   }
 465 
 466   if (UseConcMarkSweepGC) {
 467     // We need to call this even when ResizeOldPLAB is disabled
 468     // so as to avoid breaking some asserts. While we may be able
 469     // to avoid this by reorganizing the code a bit, I am loathe
 470     // to do that unless we find cases where ergo leads to bad
 471     // performance.
 472     CompactibleFreeListSpaceLAB::compute_desired_plab_size();
 473   }
 474 }
 475 
 476 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 477                                ParScanThreadState* par_scan_state) :
 478   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
 479   _boundary = _g->reserved().end();
 480 }
 481 
 482 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 483 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 484 
 485 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 486 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 487 
 488 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 489 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 490 
 491 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 492 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }


< prev index next >