41 #include "gc/shared/strongRootsScope.hpp" 42 #include "memory/iterator.hpp" 43 #include "oops/instanceRefKlass.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.inline.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/prefetch.inline.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "utilities/copy.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/stack.inline.hpp" 52 #if INCLUDE_ALL_GCS 53 #include "gc/cms/parOopClosures.hpp" 54 #endif 55 56 // 57 // DefNewGeneration functions. 58 59 // Methods of protected closure types. 60 61 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 62 assert(g->level() == 0, "Optimized for youngest gen."); 63 } 64 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 65 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 66 } 67 68 DefNewGeneration::KeepAliveClosure:: 69 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 70 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 71 _rs = (CardTableRS*)rs; 72 } 73 74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 76 77 78 DefNewGeneration::FastKeepAliveClosure:: 79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 80 DefNewGeneration::KeepAliveClosure(cl) { 81 _boundary = g->reserved().end(); 82 } 83 84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 86 87 DefNewGeneration::EvacuateFollowersClosure:: 88 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 89 ScanClosure* cur, ScanClosure* older) : 90 _gch(gch), _level(level), 91 _scan_cur_or_nonheap(cur), _scan_older(older) 92 {} 93 94 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 95 do { 96 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 97 _scan_older); 98 } while (!_gch->no_allocs_since_save_marks(_level)); 99 } 100 101 DefNewGeneration::FastEvacuateFollowersClosure:: 102 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 103 DefNewGeneration* gen, 104 FastScanClosure* cur, FastScanClosure* older) : 105 _gch(gch), _level(level), _gen(gen), 106 _scan_cur_or_nonheap(cur), _scan_older(older) 107 {} 108 109 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 110 do { 111 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 112 _scan_older); 113 } while (!_gch->no_allocs_since_save_marks(_level)); 114 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 115 } 116 117 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 118 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 119 { 120 assert(_g->level() == 0, "Optimized for youngest generation"); 121 _boundary = _g->reserved().end(); 122 } 123 124 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 126 127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 128 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 129 { 130 assert(_g->level() == 0, "Optimized for youngest generation"); 131 _boundary = _g->reserved().end(); 132 } 133 134 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 136 137 void KlassScanClosure::do_klass(Klass* klass) { 138 #ifndef PRODUCT 139 if (TraceScavenge) { 140 ResourceMark rm; 141 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 142 p2i(klass), 143 klass->external_name(), 144 klass->has_modified_oops() ? "true" : "false"); 145 } 146 #endif 147 148 // If the klass has not been dirtied we know that there's 149 // no references into the young gen and we can skip it. 150 if (klass->has_modified_oops()) { 151 if (_accumulate_modified_oops) { 152 klass->accumulate_modified_oops(); 153 } 154 155 // Clear this state since we're going to scavenge all the metadata. 156 klass->clear_modified_oops(); 157 158 // Tell the closure which Klass is being scanned so that it can be dirtied 159 // if oops are left pointing into the young gen. 160 _scavenge_closure->set_scanned_klass(klass); 161 162 klass->oops_do(_scavenge_closure); 163 164 _scavenge_closure->set_scanned_klass(NULL); 165 } 166 } 167 168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 169 _g(g) 170 { 171 assert(_g->level() == 0, "Optimized for youngest generation"); 172 _boundary = _g->reserved().end(); 173 } 174 175 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 176 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 177 178 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 179 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 180 181 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 182 KlassRemSet* klass_rem_set) 183 : _scavenge_closure(scavenge_closure), 184 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 185 186 187 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 188 size_t initial_size, 189 int level, 190 const char* policy) 191 : Generation(rs, initial_size, level), 192 _promo_failure_drain_in_progress(false), 193 _should_allocate_from_space(false) 194 { 195 MemRegion cmr((HeapWord*)_virtual_space.low(), 196 (HeapWord*)_virtual_space.high()); 197 GenCollectedHeap* gch = GenCollectedHeap::heap(); 198 199 gch->barrier_set()->resize_covered_region(cmr); 200 201 _eden_space = new ContiguousSpace(); 202 _from_space = new ContiguousSpace(); 203 _to_space = new ContiguousSpace(); 204 205 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 206 vm_exit_during_initialization("Could not allocate a new gen space"); 207 208 // Compute the maximum eden and survivor space sizes. These sizes 209 // are computed assuming the entire reserved space is committed. 210 // These values are exported as performance counters. 211 uintx alignment = gch->collector_policy()->space_alignment(); 355 } 356 357 // Do not attempt an expand-to-the reserve size. The 358 // request should properly observe the maximum size of 359 // the generation so an expand-to-reserve should be 360 // unnecessary. Also a second call to expand-to-reserve 361 // value potentially can cause an undue expansion. 362 // For example if the first expand fail for unknown reasons, 363 // but the second succeeds and expands the heap to its maximum 364 // value. 365 if (GC_locker::is_active()) { 366 if (PrintGC && Verbose) { 367 gclog_or_tty->print_cr("Garbage collection disabled, " 368 "expanded heap instead"); 369 } 370 } 371 372 return success; 373 } 374 375 376 void DefNewGeneration::compute_new_size() { 377 // This is called after a gc that includes the following generation 378 // (which is required to exist.) So from-space will normally be empty. 379 // Note that we check both spaces, since if scavenge failed they revert roles. 380 // If not we bail out (otherwise we would have to relocate the objects) 381 if (!from()->is_empty() || !to()->is_empty()) { 382 return; 383 } 384 385 int next_level = level() + 1; 386 GenCollectedHeap* gch = GenCollectedHeap::heap(); 387 assert(next_level == 1, "DefNewGeneration must be a young gen"); 388 389 Generation* old_gen = gch->old_gen(); 390 size_t old_size = old_gen->capacity(); 391 size_t new_size_before = _virtual_space.committed_size(); 392 size_t min_new_size = spec()->init_size(); 393 size_t max_new_size = reserved().byte_size(); 394 assert(min_new_size <= new_size_before && 395 new_size_before <= max_new_size, 396 "just checking"); 397 // All space sizes must be multiples of Generation::GenGrain. 398 size_t alignment = Generation::GenGrain; 399 400 // Compute desired new generation size based on NewRatio and 401 // NewSizeThreadIncrease 402 size_t desired_new_size = old_size/NewRatio; 403 int threads_count = Threads::number_of_non_daemon_threads(); 404 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 405 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 406 407 // Adjust new generation size 408 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 409 assert(desired_new_size <= max_new_size, "just checking"); 410 586 } 587 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 588 589 init_assuming_no_promotion_failure(); 590 591 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 592 // Capture heap used before collection (for printing). 593 size_t gch_prev_used = gch->used(); 594 595 gch->trace_heap_before_gc(&gc_tracer); 596 597 // These can be shared for all code paths 598 IsAliveClosure is_alive(this); 599 ScanWeakRefClosure scan_weak_ref(this); 600 601 age_table()->clear(); 602 to()->clear(SpaceDecorator::Mangle); 603 604 gch->rem_set()->prepare_for_younger_refs_iterate(false); 605 606 assert(gch->no_allocs_since_save_marks(0), 607 "save marks have not been newly set."); 608 609 // Not very pretty. 610 CollectorPolicy* cp = gch->collector_policy(); 611 612 FastScanClosure fsc_with_no_gc_barrier(this, false); 613 FastScanClosure fsc_with_gc_barrier(this, true); 614 615 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 616 gch->rem_set()->klass_rem_set()); 617 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 618 &fsc_with_no_gc_barrier, 619 false); 620 621 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 622 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 623 &fsc_with_no_gc_barrier, 624 &fsc_with_gc_barrier); 625 626 assert(gch->no_allocs_since_save_marks(0), 627 "save marks have not been newly set."); 628 629 { 630 // DefNew needs to run with n_threads == 0, to make sure the serial 631 // version of the card table scanning code is used. 632 // See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel. 633 StrongRootsScope srs(0); 634 635 gch->gen_process_roots(&srs, 636 _level, 637 true, // Process younger gens, if any, 638 // as strong roots. 639 GenCollectedHeap::SO_ScavengeCodeCache, 640 GenCollectedHeap::StrongAndWeakRoots, 641 &fsc_with_no_gc_barrier, 642 &fsc_with_gc_barrier, 643 &cld_scan_closure); 644 } 645 646 // "evacuate followers". 647 evacuate_followers.do_void(); 648 649 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 650 ReferenceProcessor* rp = ref_processor(); 651 rp->setup_policy(clear_all_soft_refs); 652 const ReferenceProcessorStats& stats = 653 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 654 NULL, _gc_timer, gc_tracer.gc_id()); 655 gc_tracer.report_gc_reference_stats(stats); 656 853 } 854 855 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 856 \ 857 void DefNewGeneration:: \ 858 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 859 cl->set_generation(this); \ 860 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 861 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 862 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 863 cl->reset_generation(); \ 864 save_marks(); \ 865 } 866 867 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 868 869 #undef DefNew_SINCE_SAVE_MARKS_DEFN 870 871 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 872 size_t max_alloc_words) { 873 if (requestor == this || _promotion_failed) return; 874 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 875 876 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 877 if (to_space->top() > to_space->bottom()) { 878 trace("to_space not empty when contribute_scratch called"); 879 } 880 */ 881 882 ContiguousSpace* to_space = to(); 883 assert(to_space->end() >= to_space->top(), "pointers out of order"); 884 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 885 if (free_words >= MinFreeScratchWords) { 886 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 887 sb->num_words = free_words; 888 sb->next = list; 889 list = sb; 890 } 891 } 892 893 void DefNewGeneration::reset_scratch() { 894 // If contributing scratch in to_space, mangle all of | 41 #include "gc/shared/strongRootsScope.hpp" 42 #include "memory/iterator.hpp" 43 #include "oops/instanceRefKlass.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.inline.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/prefetch.inline.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "utilities/copy.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/stack.inline.hpp" 52 #if INCLUDE_ALL_GCS 53 #include "gc/cms/parOopClosures.hpp" 54 #endif 55 56 // 57 // DefNewGeneration functions. 58 59 // Methods of protected closure types. 60 61 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { 62 assert(_young_gen->kind() == Generation::ParNew || 63 _young_gen->kind() == Generation::DefNew, "Expected the young generation here"); 64 } 65 66 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 67 return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); 68 } 69 70 DefNewGeneration::KeepAliveClosure:: 71 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 72 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 73 _rs = (CardTableRS*)rs; 74 } 75 76 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 77 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 78 79 80 DefNewGeneration::FastKeepAliveClosure:: 81 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 82 DefNewGeneration::KeepAliveClosure(cl) { 83 _boundary = g->reserved().end(); 84 } 85 86 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 87 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 88 89 DefNewGeneration::EvacuateFollowersClosure:: 90 EvacuateFollowersClosure(GenCollectedHeap* gch, 91 ScanClosure* cur, 92 ScanClosure* older) : 93 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 94 {} 95 96 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 97 do { 98 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, 99 _scan_older); 100 } while (!_gch->no_allocs_since_save_marks(Generation::Young)); 101 } 102 103 DefNewGeneration::FastEvacuateFollowersClosure:: 104 FastEvacuateFollowersClosure(GenCollectedHeap* gch, 105 FastScanClosure* cur, 106 FastScanClosure* older) : 107 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 108 { 109 assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew"); 110 _gen = (DefNewGeneration*)_gch->young_gen(); 111 } 112 113 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 114 do { 115 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); 116 } while (!_gch->no_allocs_since_save_marks(Generation::Young)); 117 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 118 } 119 120 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 122 { 123 _boundary = _g->reserved().end(); 124 } 125 126 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 127 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 128 129 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 130 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 131 { 132 _boundary = _g->reserved().end(); 133 } 134 135 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 136 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 137 138 void KlassScanClosure::do_klass(Klass* klass) { 139 #ifndef PRODUCT 140 if (TraceScavenge) { 141 ResourceMark rm; 142 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 143 p2i(klass), 144 klass->external_name(), 145 klass->has_modified_oops() ? "true" : "false"); 146 } 147 #endif 148 149 // If the klass has not been dirtied we know that there's 150 // no references into the young gen and we can skip it. 151 if (klass->has_modified_oops()) { 152 if (_accumulate_modified_oops) { 153 klass->accumulate_modified_oops(); 154 } 155 156 // Clear this state since we're going to scavenge all the metadata. 157 klass->clear_modified_oops(); 158 159 // Tell the closure which Klass is being scanned so that it can be dirtied 160 // if oops are left pointing into the young gen. 161 _scavenge_closure->set_scanned_klass(klass); 162 163 klass->oops_do(_scavenge_closure); 164 165 _scavenge_closure->set_scanned_klass(NULL); 166 } 167 } 168 169 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 170 _g(g) 171 { 172 _boundary = _g->reserved().end(); 173 } 174 175 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 176 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 177 178 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 179 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 180 181 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 182 KlassRemSet* klass_rem_set) 183 : _scavenge_closure(scavenge_closure), 184 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 185 186 187 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 188 size_t initial_size, 189 const char* policy) 190 : Generation(rs, initial_size), 191 _promo_failure_drain_in_progress(false), 192 _should_allocate_from_space(false) 193 { 194 MemRegion cmr((HeapWord*)_virtual_space.low(), 195 (HeapWord*)_virtual_space.high()); 196 GenCollectedHeap* gch = GenCollectedHeap::heap(); 197 198 gch->barrier_set()->resize_covered_region(cmr); 199 200 _eden_space = new ContiguousSpace(); 201 _from_space = new ContiguousSpace(); 202 _to_space = new ContiguousSpace(); 203 204 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 205 vm_exit_during_initialization("Could not allocate a new gen space"); 206 207 // Compute the maximum eden and survivor space sizes. These sizes 208 // are computed assuming the entire reserved space is committed. 209 // These values are exported as performance counters. 210 uintx alignment = gch->collector_policy()->space_alignment(); 354 } 355 356 // Do not attempt an expand-to-the reserve size. The 357 // request should properly observe the maximum size of 358 // the generation so an expand-to-reserve should be 359 // unnecessary. Also a second call to expand-to-reserve 360 // value potentially can cause an undue expansion. 361 // For example if the first expand fail for unknown reasons, 362 // but the second succeeds and expands the heap to its maximum 363 // value. 364 if (GC_locker::is_active()) { 365 if (PrintGC && Verbose) { 366 gclog_or_tty->print_cr("Garbage collection disabled, " 367 "expanded heap instead"); 368 } 369 } 370 371 return success; 372 } 373 374 void DefNewGeneration::compute_new_size() { 375 // This is called after a GC that includes the old generation, so from-space 376 // will normally be empty. 377 // Note that we check both spaces, since if scavenge failed they revert roles. 378 // If not we bail out (otherwise we would have to relocate the objects). 379 if (!from()->is_empty() || !to()->is_empty()) { 380 return; 381 } 382 383 GenCollectedHeap* gch = GenCollectedHeap::heap(); 384 385 size_t old_size = gch->old_gen()->capacity(); 386 size_t new_size_before = _virtual_space.committed_size(); 387 size_t min_new_size = spec()->init_size(); 388 size_t max_new_size = reserved().byte_size(); 389 assert(min_new_size <= new_size_before && 390 new_size_before <= max_new_size, 391 "just checking"); 392 // All space sizes must be multiples of Generation::GenGrain. 393 size_t alignment = Generation::GenGrain; 394 395 // Compute desired new generation size based on NewRatio and 396 // NewSizeThreadIncrease 397 size_t desired_new_size = old_size/NewRatio; 398 int threads_count = Threads::number_of_non_daemon_threads(); 399 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 400 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 401 402 // Adjust new generation size 403 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 404 assert(desired_new_size <= max_new_size, "just checking"); 405 581 } 582 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 583 584 init_assuming_no_promotion_failure(); 585 586 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 587 // Capture heap used before collection (for printing). 588 size_t gch_prev_used = gch->used(); 589 590 gch->trace_heap_before_gc(&gc_tracer); 591 592 // These can be shared for all code paths 593 IsAliveClosure is_alive(this); 594 ScanWeakRefClosure scan_weak_ref(this); 595 596 age_table()->clear(); 597 to()->clear(SpaceDecorator::Mangle); 598 599 gch->rem_set()->prepare_for_younger_refs_iterate(false); 600 601 assert(gch->no_allocs_since_save_marks(Generation::Young), 602 "save marks have not been newly set."); 603 604 // Not very pretty. 605 CollectorPolicy* cp = gch->collector_policy(); 606 607 FastScanClosure fsc_with_no_gc_barrier(this, false); 608 FastScanClosure fsc_with_gc_barrier(this, true); 609 610 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 611 gch->rem_set()->klass_rem_set()); 612 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 613 &fsc_with_no_gc_barrier, 614 false); 615 616 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 617 FastEvacuateFollowersClosure evacuate_followers(gch, 618 &fsc_with_no_gc_barrier, 619 &fsc_with_gc_barrier); 620 621 assert(gch->no_allocs_since_save_marks(Generation::Young), 622 "save marks have not been newly set."); 623 624 { 625 // DefNew needs to run with n_threads == 0, to make sure the serial 626 // version of the card table scanning code is used. 627 // See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel. 628 StrongRootsScope srs(0); 629 630 gch->gen_process_roots(&srs, 631 Generation::Young, 632 true, // Process younger gens, if any, 633 // as strong roots. 634 GenCollectedHeap::SO_ScavengeCodeCache, 635 GenCollectedHeap::StrongAndWeakRoots, 636 &fsc_with_no_gc_barrier, 637 &fsc_with_gc_barrier, 638 &cld_scan_closure); 639 } 640 641 // "evacuate followers". 642 evacuate_followers.do_void(); 643 644 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 645 ReferenceProcessor* rp = ref_processor(); 646 rp->setup_policy(clear_all_soft_refs); 647 const ReferenceProcessorStats& stats = 648 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 649 NULL, _gc_timer, gc_tracer.gc_id()); 650 gc_tracer.report_gc_reference_stats(stats); 651 848 } 849 850 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 851 \ 852 void DefNewGeneration:: \ 853 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 854 cl->set_generation(this); \ 855 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 856 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 857 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 858 cl->reset_generation(); \ 859 save_marks(); \ 860 } 861 862 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 863 864 #undef DefNew_SINCE_SAVE_MARKS_DEFN 865 866 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 867 size_t max_alloc_words) { 868 if (requestor == this || _promotion_failed) { 869 return; 870 } 871 assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation"); 872 873 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 874 if (to_space->top() > to_space->bottom()) { 875 trace("to_space not empty when contribute_scratch called"); 876 } 877 */ 878 879 ContiguousSpace* to_space = to(); 880 assert(to_space->end() >= to_space->top(), "pointers out of order"); 881 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 882 if (free_words >= MinFreeScratchWords) { 883 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 884 sb->num_words = free_words; 885 sb->next = list; 886 list = sb; 887 } 888 } 889 890 void DefNewGeneration::reset_scratch() { 891 // If contributing scratch in to_space, mangle all of |