< prev index next >

src/hotspot/share/gc/serial/defNewGeneration.cpp

Print this page




 104 }
 105 
 106 DefNewGeneration::FastEvacuateFollowersClosure::
 107 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
 108                              FastScanClosure* cur,
 109                              FastScanClosure* older) :
 110   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 111 {
 112   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
 113   _young_gen = (DefNewGeneration*)_gch->young_gen();
 114 }
 115 
 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 117   do {
 118     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 119   } while (!_gch->no_allocs_since_save_marks());
 120   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 121 }
 122 
 123 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 124     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 125 {
 126   _boundary = _g->reserved().end();
 127 }
 128 
 129 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 130 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 131 
 132 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 133     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 134 {
 135   _boundary = _g->reserved().end();
 136 }
 137 
 138 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 140 
 141 void KlassScanClosure::do_klass(Klass* klass) {
 142   NOT_PRODUCT(ResourceMark rm);
 143   log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
 144                                   p2i(klass),
 145                                   klass->external_name(),
 146                                   klass->has_modified_oops() ? "true" : "false");
 147 
 148   // If the klass has not been dirtied we know that there's
 149   // no references into  the young gen and we can skip it.
 150   if (klass->has_modified_oops()) {
 151     if (_accumulate_modified_oops) {
 152       klass->accumulate_modified_oops();
 153     }
 154 
 155     // Clear this state since we're going to scavenge all the metadata.
 156     klass->clear_modified_oops();
 157 
 158     // Tell the closure which Klass is being scanned so that it can be dirtied
 159     // if oops are left pointing into the young gen.
 160     _scavenge_closure->set_scanned_klass(klass);
 161 
 162     klass->oops_do(_scavenge_closure);

 163 
 164     _scavenge_closure->set_scanned_klass(NULL);
 165   }
 166 }
 167 
 168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 169   _g(g)
 170 {
 171   _boundary = _g->reserved().end();
 172 }
 173 
 174 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 176 
 177 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 179 
 180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 181                                    KlassRemSet* klass_rem_set)
 182     : _scavenge_closure(scavenge_closure),
 183       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 184 
 185 
 186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 187                                    size_t initial_size,
 188                                    const char* policy)
 189   : Generation(rs, initial_size),
 190     _preserved_marks_set(false /* in_c_heap */),
 191     _promo_failure_drain_in_progress(false),
 192     _should_allocate_from_space(false)
 193 {
 194   MemRegion cmr((HeapWord*)_virtual_space.low(),
 195                 (HeapWord*)_virtual_space.high());
 196   GenCollectedHeap* gch = GenCollectedHeap::heap();
 197 
 198   gch->barrier_set()->resize_covered_region(cmr);
 199 
 200   _eden_space = new ContiguousSpace();
 201   _from_space = new ContiguousSpace();
 202   _to_space   = new ContiguousSpace();
 203 
 204   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 205     vm_exit_during_initialization("Could not allocate a new gen space");


 612   // These can be shared for all code paths
 613   IsAliveClosure is_alive(this);
 614   ScanWeakRefClosure scan_weak_ref(this);
 615 
 616   age_table()->clear();
 617   to()->clear(SpaceDecorator::Mangle);
 618   // The preserved marks should be empty at the start of the GC.
 619   _preserved_marks_set.init(1);
 620 
 621   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 622 
 623   assert(gch->no_allocs_since_save_marks(),
 624          "save marks have not been newly set.");
 625 
 626   // Not very pretty.
 627   CollectorPolicy* cp = gch->collector_policy();
 628 
 629   FastScanClosure fsc_with_no_gc_barrier(this, false);
 630   FastScanClosure fsc_with_gc_barrier(this, true);
 631 
 632   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
 633                                       gch->rem_set()->klass_rem_set());
 634   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 635                                            &fsc_with_no_gc_barrier,
 636                                            false);
 637 
 638   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 639   FastEvacuateFollowersClosure evacuate_followers(gch,
 640                                                   &fsc_with_no_gc_barrier,
 641                                                   &fsc_with_gc_barrier);
 642 
 643   assert(gch->no_allocs_since_save_marks(),
 644          "save marks have not been newly set.");
 645 
 646   {
 647     // DefNew needs to run with n_threads == 0, to make sure the serial
 648     // version of the card table scanning code is used.
 649     // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
 650     StrongRootsScope srs(0);
 651 
 652     gch->young_process_roots(&srs,
 653                              &fsc_with_no_gc_barrier,
 654                              &fsc_with_gc_barrier,
 655                              &cld_scan_closure);
 656   }




 104 }
 105 
 106 DefNewGeneration::FastEvacuateFollowersClosure::
 107 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
 108                              FastScanClosure* cur,
 109                              FastScanClosure* older) :
 110   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 111 {
 112   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
 113   _young_gen = (DefNewGeneration*)_gch->young_gen();
 114 }
 115 
 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 117   do {
 118     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 119   } while (!_gch->no_allocs_since_save_marks());
 120   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 121 }
 122 
 123 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 124     OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 125 {
 126   _boundary = _g->reserved().end();
 127 }
 128 
 129 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 130 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 131 
 132 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 133     OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 134 {
 135   _boundary = _g->reserved().end();
 136 }
 137 
 138 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 140 
 141 void CLDScanClosure::do_cld(ClassLoaderData* cld) {
 142   NOT_PRODUCT(ResourceMark rm);
 143   log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
 144                                   p2i(cld),
 145                                   cld->loader_name(),
 146                                   cld->has_modified_oops() ? "true" : "false");
 147 
 148   // If the cld has not been dirtied we know that there's
 149   // no references into  the young gen and we can skip it.
 150   if (cld->has_modified_oops()) {
 151     if (_accumulate_modified_oops) {
 152       cld->accumulate_modified_oops();
 153     }
 154 
 155     // Tell the closure which CLD is being scanned so that it can be dirtied



 156     // if oops are left pointing into the young gen.
 157     _scavenge_closure->set_scanned_cld(cld);
 158 
 159     // Clean the cld since we're going to scavenge all the metadata.
 160     cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
 161 
 162     _scavenge_closure->set_scanned_cld(NULL);
 163   }
 164 }
 165 
 166 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 167   _g(g)
 168 {
 169   _boundary = _g->reserved().end();
 170 }
 171 
 172 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 173 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 174 
 175 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 176 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 177 






 178 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 179                                    size_t initial_size,
 180                                    const char* policy)
 181   : Generation(rs, initial_size),
 182     _preserved_marks_set(false /* in_c_heap */),
 183     _promo_failure_drain_in_progress(false),
 184     _should_allocate_from_space(false)
 185 {
 186   MemRegion cmr((HeapWord*)_virtual_space.low(),
 187                 (HeapWord*)_virtual_space.high());
 188   GenCollectedHeap* gch = GenCollectedHeap::heap();
 189 
 190   gch->barrier_set()->resize_covered_region(cmr);
 191 
 192   _eden_space = new ContiguousSpace();
 193   _from_space = new ContiguousSpace();
 194   _to_space   = new ContiguousSpace();
 195 
 196   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 197     vm_exit_during_initialization("Could not allocate a new gen space");


 604   // These can be shared for all code paths
 605   IsAliveClosure is_alive(this);
 606   ScanWeakRefClosure scan_weak_ref(this);
 607 
 608   age_table()->clear();
 609   to()->clear(SpaceDecorator::Mangle);
 610   // The preserved marks should be empty at the start of the GC.
 611   _preserved_marks_set.init(1);
 612 
 613   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 614 
 615   assert(gch->no_allocs_since_save_marks(),
 616          "save marks have not been newly set.");
 617 
 618   // Not very pretty.
 619   CollectorPolicy* cp = gch->collector_policy();
 620 
 621   FastScanClosure fsc_with_no_gc_barrier(this, false);
 622   FastScanClosure fsc_with_gc_barrier(this, true);
 623 
 624   CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
 625                                   gch->rem_set()->cld_rem_set()->accumulate_modified_oops());



 626 
 627   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 628   FastEvacuateFollowersClosure evacuate_followers(gch,
 629                                                   &fsc_with_no_gc_barrier,
 630                                                   &fsc_with_gc_barrier);
 631 
 632   assert(gch->no_allocs_since_save_marks(),
 633          "save marks have not been newly set.");
 634 
 635   {
 636     // DefNew needs to run with n_threads == 0, to make sure the serial
 637     // version of the card table scanning code is used.
 638     // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
 639     StrongRootsScope srs(0);
 640 
 641     gch->young_process_roots(&srs,
 642                              &fsc_with_no_gc_barrier,
 643                              &fsc_with_gc_barrier,
 644                              &cld_scan_closure);
 645   }


< prev index next >