< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page




 476     // them all into one.
 477     AgeTable *local_table = par_scan_state.age_table();
 478     _young_gen.age_table()->merge(local_table);
 479 
 480     // Inform old gen that we're done.
 481     _old_gen.par_promote_alloc_done(i);
 482   }
 483 
 484   if (UseConcMarkSweepGC) {
 485     // We need to call this even when ResizeOldPLAB is disabled
 486     // so as to avoid breaking some asserts. While we may be able
 487     // to avoid this by reorganizing the code a bit, I am loathe
 488     // to do that unless we find cases where ergo leads to bad
 489     // performance.
 490     CompactibleFreeListSpaceLAB::compute_desired_plab_size();
 491   }
 492 }
 493 
 494 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 495                                ParScanThreadState* par_scan_state) :
 496   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
 497   _boundary = _g->reserved().end();
 498 }
 499 
 500 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 501 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 502 
 503 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 504 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 505 
 506 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 507 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 508 
 509 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 510 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
 511 
 512 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
 513                                              ParScanThreadState* par_scan_state)
 514   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
 515 {}
 516 


 584                              StrongRootsScope* strong_roots_scope) :
 585     AbstractGangTask("ParNewGeneration collection"),
 586     _young_gen(young_gen), _old_gen(old_gen),
 587     _young_old_boundary(young_old_boundary),
 588     _state_set(state_set),
 589     _strong_roots_scope(strong_roots_scope)
 590 {}
 591 
 592 void ParNewGenTask::work(uint worker_id) {
 593   GenCollectedHeap* gch = GenCollectedHeap::heap();
 594   // Since this is being done in a separate thread, need new resource
 595   // and handle marks.
 596   ResourceMark rm;
 597   HandleMark hm;
 598 
 599   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 600   assert(_state_set->is_valid(worker_id), "Should not have been called");
 601 
 602   par_scan_state.set_young_old_boundary(_young_old_boundary);
 603 
 604   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 605                                       gch->rem_set()->klass_rem_set());
 606   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 607                                            &par_scan_state.to_space_root_closure(),
 608                                            false);
 609 
 610   par_scan_state.start_strong_roots();
 611   gch->young_process_roots(_strong_roots_scope,
 612                            &par_scan_state.to_space_root_closure(),
 613                            &par_scan_state.older_gen_closure(),
 614                            &cld_scan_closure);
 615 
 616   par_scan_state.end_strong_roots();
 617 
 618   // "evacuate followers".
 619   par_scan_state.evacuate_followers_closure().do_void();
 620 
 621   // This will collapse this worker's promoted object list that's
 622   // created during the main ParNew parallel phase of ParNew. This has
 623   // to be called after all workers have finished promoting objects
 624   // and scanning promoted objects. It should be safe calling it from
 625   // here, given that we can only reach here after all thread have
 626   // offered termination, i.e., after there is no more work to be
 627   // done. It will also disable promotion tracking for the rest of
 628   // this GC as it's not necessary to be on during reference processing.




 476     // them all into one.
 477     AgeTable *local_table = par_scan_state.age_table();
 478     _young_gen.age_table()->merge(local_table);
 479 
 480     // Inform old gen that we're done.
 481     _old_gen.par_promote_alloc_done(i);
 482   }
 483 
 484   if (UseConcMarkSweepGC) {
 485     // We need to call this even when ResizeOldPLAB is disabled
 486     // so as to avoid breaking some asserts. While we may be able
 487     // to avoid this by reorganizing the code a bit, I am loathe
 488     // to do that unless we find cases where ergo leads to bad
 489     // performance.
 490     CompactibleFreeListSpaceLAB::compute_desired_plab_size();
 491   }
 492 }
 493 
 494 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 495                                ParScanThreadState* par_scan_state) :
 496   OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
 497   _boundary = _g->reserved().end();
 498 }
 499 
 500 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 501 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 502 
 503 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 504 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 505 
 506 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 507 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 508 
 509 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 510 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
 511 
 512 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
 513                                              ParScanThreadState* par_scan_state)
 514   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
 515 {}
 516 


 584                              StrongRootsScope* strong_roots_scope) :
 585     AbstractGangTask("ParNewGeneration collection"),
 586     _young_gen(young_gen), _old_gen(old_gen),
 587     _young_old_boundary(young_old_boundary),
 588     _state_set(state_set),
 589     _strong_roots_scope(strong_roots_scope)
 590 {}
 591 
 592 void ParNewGenTask::work(uint worker_id) {
 593   GenCollectedHeap* gch = GenCollectedHeap::heap();
 594   // Since this is being done in a separate thread, need new resource
 595   // and handle marks.
 596   ResourceMark rm;
 597   HandleMark hm;
 598 
 599   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 600   assert(_state_set->is_valid(worker_id), "Should not have been called");
 601 
 602   par_scan_state.set_young_old_boundary(_young_old_boundary);
 603 
 604   CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
 605                                   gch->rem_set()->cld_rem_set()->accumulate_modified_oops());



 606 
 607   par_scan_state.start_strong_roots();
 608   gch->young_process_roots(_strong_roots_scope,
 609                            &par_scan_state.to_space_root_closure(),
 610                            &par_scan_state.older_gen_closure(),
 611                            &cld_scan_closure);
 612 
 613   par_scan_state.end_strong_roots();
 614 
 615   // "evacuate followers".
 616   par_scan_state.evacuate_followers_closure().do_void();
 617 
 618   // This will collapse this worker's promoted object list that's
 619   // created during the main ParNew parallel phase of ParNew. This has
 620   // to be called after all workers have finished promoting objects
 621   // and scanning promoted objects. It should be safe calling it from
 622   // here, given that we can only reach here after all thread have
 623   // offered termination, i.e., after there is no more work to be
 624   // done. It will also disable promotion tracking for the rest of
 625   // this GC as it's not necessary to be on during reference processing.


< prev index next >