< prev index next >

src/share/vm/gc/serial/defNewGeneration.cpp

Print this page




 104 }
 105 
 106 DefNewGeneration::FastEvacuateFollowersClosure::
 107 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
 108                              FastScanClosure* cur,
 109                              FastScanClosure* older) :
 110   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 111 {
 112   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
 113   _young_gen = (DefNewGeneration*)_gch->young_gen();
 114 }
 115 
 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 117   do {
 118     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 119   } while (!_gch->no_allocs_since_save_marks());
 120   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 121 }
 122 
 123 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 124     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 125 {
 126   _boundary = _g->reserved().end();
 127 }
 128 
 129 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 130 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 131 
 132 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 133     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 134 {
 135   _boundary = _g->reserved().end();
 136 }
 137 
 138 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 140 
 141 void KlassScanClosure::do_klass(Klass* klass) {
 142   NOT_PRODUCT(ResourceMark rm);
 143   log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
 144                                   p2i(klass),
 145                                   klass->external_name(),
 146                                   klass->has_modified_oops() ? "true" : "false");
 147 
 148   // If the klass has not been dirtied we know that there's
 149   // no references into  the young gen and we can skip it.
 150   if (klass->has_modified_oops()) {
 151     if (_accumulate_modified_oops) {
 152       klass->accumulate_modified_oops();
 153     }
 154 
 155     // Clear this state since we're going to scavenge all the metadata.
 156     klass->clear_modified_oops();
 157 
 158     // Tell the closure which Klass is being scanned so that it can be dirtied
 159     // if oops are left pointing into the young gen.
 160     _scavenge_closure->set_scanned_klass(klass);
 161 
 162     klass->oops_do(_scavenge_closure);
 163 
 164     _scavenge_closure->set_scanned_klass(NULL);
 165   }
 166 }
 167 
 168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 169   _g(g)
 170 {
 171   _boundary = _g->reserved().end();
 172 }
 173 
 174 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 176 
 177 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 179 
 180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 181                                    KlassRemSet* klass_rem_set)
 182     : _scavenge_closure(scavenge_closure),
 183       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 184 
 185 
 186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 187                                    size_t initial_size,
 188                                    const char* policy)
 189   : Generation(rs, initial_size),
 190     _preserved_marks_set(false /* in_c_heap */),
 191     _promo_failure_drain_in_progress(false),
 192     _should_allocate_from_space(false)
 193 {
 194   MemRegion cmr((HeapWord*)_virtual_space.low(),
 195                 (HeapWord*)_virtual_space.high());
 196   GenCollectedHeap* gch = GenCollectedHeap::heap();
 197 
 198   gch->barrier_set()->resize_covered_region(cmr);
 199 
 200   _eden_space = new ContiguousSpace();
 201   _from_space = new ContiguousSpace();
 202   _to_space   = new ContiguousSpace();
 203 
 204   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 205     vm_exit_during_initialization("Could not allocate a new gen space");


 612   // These can be shared for all code paths
 613   IsAliveClosure is_alive(this);
 614   ScanWeakRefClosure scan_weak_ref(this);
 615 
 616   age_table()->clear();
 617   to()->clear(SpaceDecorator::Mangle);
 618   // The preserved marks should be empty at the start of the GC.
 619   _preserved_marks_set.init(1);
 620 
 621   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 622 
 623   assert(gch->no_allocs_since_save_marks(),
 624          "save marks have not been newly set.");
 625 
 626   // Not very pretty.
 627   CollectorPolicy* cp = gch->collector_policy();
 628 
 629   FastScanClosure fsc_with_no_gc_barrier(this, false);
 630   FastScanClosure fsc_with_gc_barrier(this, true);
 631 
 632   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
 633                                       gch->rem_set()->klass_rem_set());
 634   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 635                                            &fsc_with_no_gc_barrier,
 636                                            false);
 637 
 638   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 639   FastEvacuateFollowersClosure evacuate_followers(gch,
 640                                                   &fsc_with_no_gc_barrier,
 641                                                   &fsc_with_gc_barrier);
 642 
 643   assert(gch->no_allocs_since_save_marks(),
 644          "save marks have not been newly set.");
 645 
 646   {
 647     // DefNew needs to run with n_threads == 0, to make sure the serial
 648     // version of the card table scanning code is used.
 649     // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
 650     StrongRootsScope srs(0);
 651 
 652     gch->young_process_roots(&srs,
 653                              &fsc_with_no_gc_barrier,
 654                              &fsc_with_gc_barrier,
 655                              &cld_scan_closure);




 104 }
 105 
 106 DefNewGeneration::FastEvacuateFollowersClosure::
 107 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
 108                              FastScanClosure* cur,
 109                              FastScanClosure* older) :
 110   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 111 {
 112   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
 113   _young_gen = (DefNewGeneration*)_gch->young_gen();
 114 }
 115 
 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 117   do {
 118     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 119   } while (!_gch->no_allocs_since_save_marks());
 120   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 121 }
 122 
 123 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 124     OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 125 {
 126   _boundary = _g->reserved().end();
 127 }
 128 
 129 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 130 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 131 
 132 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 133     OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 134 {
 135   _boundary = _g->reserved().end();
 136 }
 137 
 138 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 140 


























 141 
 142 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 143   _g(g)
 144 {
 145   _boundary = _g->reserved().end();
 146 }
 147 
 148 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 149 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 150 
 151 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 152 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 153 






 154 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 155                                    size_t initial_size,
 156                                    const char* policy)
 157   : Generation(rs, initial_size),
 158     _preserved_marks_set(false /* in_c_heap */),
 159     _promo_failure_drain_in_progress(false),
 160     _should_allocate_from_space(false)
 161 {
 162   MemRegion cmr((HeapWord*)_virtual_space.low(),
 163                 (HeapWord*)_virtual_space.high());
 164   GenCollectedHeap* gch = GenCollectedHeap::heap();
 165 
 166   gch->barrier_set()->resize_covered_region(cmr);
 167 
 168   _eden_space = new ContiguousSpace();
 169   _from_space = new ContiguousSpace();
 170   _to_space   = new ContiguousSpace();
 171 
 172   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 173     vm_exit_during_initialization("Could not allocate a new gen space");


 580   // These can be shared for all code paths
 581   IsAliveClosure is_alive(this);
 582   ScanWeakRefClosure scan_weak_ref(this);
 583 
 584   age_table()->clear();
 585   to()->clear(SpaceDecorator::Mangle);
 586   // The preserved marks should be empty at the start of the GC.
 587   _preserved_marks_set.init(1);
 588 
 589   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 590 
 591   assert(gch->no_allocs_since_save_marks(),
 592          "save marks have not been newly set.");
 593 
 594   // Not very pretty.
 595   CollectorPolicy* cp = gch->collector_policy();
 596 
 597   FastScanClosure fsc_with_no_gc_barrier(this, false);
 598   FastScanClosure fsc_with_gc_barrier(this, true);
 599 
 600   CLDToKlassAndOopClosure cld_scan_closure(&fsc_with_no_gc_barrier,



 601                                            false);
 602 
 603   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 604   FastEvacuateFollowersClosure evacuate_followers(gch,
 605                                                   &fsc_with_no_gc_barrier,
 606                                                   &fsc_with_gc_barrier);
 607 
 608   assert(gch->no_allocs_since_save_marks(),
 609          "save marks have not been newly set.");
 610 
 611   {
 612     // DefNew needs to run with n_threads == 0, to make sure the serial
 613     // version of the card table scanning code is used.
 614     // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
 615     StrongRootsScope srs(0);
 616 
 617     gch->young_process_roots(&srs,
 618                              &fsc_with_no_gc_barrier,
 619                              &fsc_with_gc_barrier,
 620                              &cld_scan_closure);


< prev index next >