75 DefNewGeneration::KeepAliveClosure:: 76 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 77 _rs = GenCollectedHeap::heap()->rem_set(); 78 } 79 80 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 81 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 82 83 84 DefNewGeneration::FastKeepAliveClosure:: 85 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 86 DefNewGeneration::KeepAliveClosure(cl) { 87 _boundary = g->reserved().end(); 88 } 89 90 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 91 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 92 93 DefNewGeneration::FastEvacuateFollowersClosure:: 94 FastEvacuateFollowersClosure(SerialHeap* heap, 95 FastScanClosure* cur, 96 FastScanClosure* older) : 97 _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older) 98 { 99 } 100 101 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 102 do { 103 _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older); 104 } while (!_heap->no_allocs_since_save_marks()); 105 guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan"); 106 } 107 108 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 109 OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 110 { 111 _boundary = _g->reserved().end(); 112 } 113 114 void CLDScanClosure::do_cld(ClassLoaderData* cld) { 115 NOT_PRODUCT(ResourceMark rm); 116 log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s", 117 p2i(cld), 118 cld->loader_name_and_id(), 119 cld->has_modified_oops() ? "true" : "false"); 120 121 // If the cld has not been dirtied we know that there's 122 // no references into the young gen and we can skip it. 123 if (cld->has_modified_oops()) { 124 if (_accumulate_modified_oops) { 125 cld->accumulate_modified_oops(); 126 } 127 128 // Tell the closure which CLD is being scanned so that it can be dirtied 129 // if oops are left pointing into the young gen. 130 _scavenge_closure->set_scanned_cld(cld); 131 132 // Clean the cld since we're going to scavenge all the metadata. 133 cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true); 553 init_assuming_no_promotion_failure(); 554 555 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause()); 556 557 heap->trace_heap_before_gc(&gc_tracer); 558 559 // These can be shared for all code paths 560 IsAliveClosure is_alive(this); 561 ScanWeakRefClosure scan_weak_ref(this); 562 563 age_table()->clear(); 564 to()->clear(SpaceDecorator::Mangle); 565 // The preserved marks should be empty at the start of the GC. 566 _preserved_marks_set.init(1); 567 568 heap->rem_set()->prepare_for_younger_refs_iterate(false); 569 570 assert(heap->no_allocs_since_save_marks(), 571 "save marks have not been newly set."); 572 573 FastScanClosure fsc_with_no_gc_barrier(this, false); 574 FastScanClosure fsc_with_gc_barrier(this, true); 575 576 CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier, 577 heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); 578 579 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 580 FastEvacuateFollowersClosure evacuate_followers(heap, 581 &fsc_with_no_gc_barrier, 582 &fsc_with_gc_barrier); 583 584 assert(heap->no_allocs_since_save_marks(), 585 "save marks have not been newly set."); 586 587 { 588 // DefNew needs to run with n_threads == 0, to make sure the serial 589 // version of the card table scanning code is used. 590 // See: CardTableRS::non_clean_card_iterate_possibly_parallel. 591 StrongRootsScope srs(0); 592 593 heap->young_process_roots(&srs, 594 &fsc_with_no_gc_barrier, 595 &fsc_with_gc_barrier, 596 &cld_scan_closure); 597 } 598 599 // "evacuate followers". 600 evacuate_followers.do_void(); 601 602 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 603 ReferenceProcessor* rp = ref_processor(); 604 rp->setup_policy(clear_all_soft_refs); 605 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues()); 606 const ReferenceProcessorStats& stats = 607 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 608 NULL, &pt); 609 gc_tracer.report_gc_reference_stats(stats); 610 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 611 pt.print_all_references(); 612 613 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set."); 614 615 WeakProcessor::weak_oops_do(&is_alive, &keep_alive); | 75 DefNewGeneration::KeepAliveClosure:: 76 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 77 _rs = GenCollectedHeap::heap()->rem_set(); 78 } 79 80 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 81 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 82 83 84 DefNewGeneration::FastKeepAliveClosure:: 85 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 86 DefNewGeneration::KeepAliveClosure(cl) { 87 _boundary = g->reserved().end(); 88 } 89 90 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 91 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 92 93 DefNewGeneration::FastEvacuateFollowersClosure:: 94 FastEvacuateFollowersClosure(SerialHeap* heap, 95 DefNewScanClosure* cur, 96 DefNewYoungerGenClosure* older) : 97 _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older) 98 { 99 } 100 101 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 102 do { 103 _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older); 104 } while (!_heap->no_allocs_since_save_marks()); 105 guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan"); 106 } 107 108 void CLDScanClosure::do_cld(ClassLoaderData* cld) { 109 NOT_PRODUCT(ResourceMark rm); 110 log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s", 111 p2i(cld), 112 cld->loader_name_and_id(), 113 cld->has_modified_oops() ? "true" : "false"); 114 115 // If the cld has not been dirtied we know that there's 116 // no references into the young gen and we can skip it. 117 if (cld->has_modified_oops()) { 118 if (_accumulate_modified_oops) { 119 cld->accumulate_modified_oops(); 120 } 121 122 // Tell the closure which CLD is being scanned so that it can be dirtied 123 // if oops are left pointing into the young gen. 124 _scavenge_closure->set_scanned_cld(cld); 125 126 // Clean the cld since we're going to scavenge all the metadata. 127 cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true); 547 init_assuming_no_promotion_failure(); 548 549 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause()); 550 551 heap->trace_heap_before_gc(&gc_tracer); 552 553 // These can be shared for all code paths 554 IsAliveClosure is_alive(this); 555 ScanWeakRefClosure scan_weak_ref(this); 556 557 age_table()->clear(); 558 to()->clear(SpaceDecorator::Mangle); 559 // The preserved marks should be empty at the start of the GC. 560 _preserved_marks_set.init(1); 561 562 heap->rem_set()->prepare_for_younger_refs_iterate(false); 563 564 assert(heap->no_allocs_since_save_marks(), 565 "save marks have not been newly set."); 566 567 DefNewScanClosure scan_closure(this); 568 DefNewYoungerGenClosure younger_gen_closure(this, _old_gen); 569 570 CLDScanClosure cld_scan_closure(&scan_closure, 571 heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); 572 573 set_promo_failure_scan_stack_closure(&scan_closure); 574 FastEvacuateFollowersClosure evacuate_followers(heap, 575 &scan_closure, 576 &younger_gen_closure); 577 578 assert(heap->no_allocs_since_save_marks(), 579 "save marks have not been newly set."); 580 581 { 582 // DefNew needs to run with n_threads == 0, to make sure the serial 583 // version of the card table scanning code is used. 584 // See: CardTableRS::non_clean_card_iterate_possibly_parallel. 585 StrongRootsScope srs(0); 586 587 heap->young_process_roots(&srs, 588 &scan_closure, 589 &younger_gen_closure, 590 &cld_scan_closure); 591 } 592 593 // "evacuate followers". 594 evacuate_followers.do_void(); 595 596 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 597 ReferenceProcessor* rp = ref_processor(); 598 rp->setup_policy(clear_all_soft_refs); 599 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues()); 600 const ReferenceProcessorStats& stats = 601 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 602 NULL, &pt); 603 gc_tracer.report_gc_reference_stats(stats); 604 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 605 pt.print_all_references(); 606 607 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set."); 608 609 WeakProcessor::weak_oops_do(&is_alive, &keep_alive); |