src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




3822   }
3823 #endif // !ASSERT
3824 }
3825 
3826 void
3827 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3828   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3829   uint array_length = g1_policy()->young_cset_region_length();
3830   for (uint i = 0; i < array_length; ++i) {
3831     _surviving_young_words[i] += surv_young_words[i];
3832   }
3833 }
3834 
3835 void
3836 G1CollectedHeap::cleanup_surviving_young_words() {
3837   guarantee( _surviving_young_words != NULL, "pre-condition" );
3838   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3839   _surviving_young_words = NULL;
3840 }
3841 










3842 #ifdef ASSERT
3843 class VerifyCSetClosure: public HeapRegionClosure {
3844 public:
3845   bool doHeapRegion(HeapRegion* hr) {
3846     // Here we check that the CSet region's RSet is ready for parallel
3847     // iteration. The fields that we'll verify are only manipulated
3848     // when the region is part of a CSet and is collected. Afterwards,
3849     // we reset these fields when we clear the region's RSet (when the
3850     // region is freed) so they are ready when the region is
3851     // re-allocated. The only exception to this is if there's an
3852     // evacuation failure and instead of freeing the region we leave
3853     // it in the heap. In that case, we reset these fields during
3854     // evacuation failure handling.
3855     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3856 
3857     // Here's a good place to add any other checks we'd like to
3858     // perform on CSet regions.
3859     return false;
3860   }
3861 };


3998     // get entries from the secondary_free_list.
3999     if (!G1StressConcRegionFreeing) {
4000       append_secondary_free_list_if_not_empty_with_lock();
4001     }
4002 
4003     assert(check_young_list_well_formed(), "young list should be well formed");
4004     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
4005            "sanity check");
4006 
4007     // Don't dynamically change the number of GC threads this early.  A value of
4008     // 0 is used to indicate serial work.  When parallel work is done,
4009     // it will be set.
4010 
4011     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
4012       IsGCActiveMark x;
4013 
4014       gc_prologue(false);
4015       increment_total_collections(false /* full gc */);
4016       increment_gc_time_stamp();
4017 








4018       verify_before_gc();
4019       check_bitmaps("GC Start");
4020 
4021       COMPILER2_PRESENT(DerivedPointerTable::clear());
4022 
4023       // Please see comment in g1CollectedHeap.hpp and
4024       // G1CollectedHeap::ref_processing_init() to see how
4025       // reference processing currently works in G1.
4026 
4027       // Enable discovery in the STW reference processor
4028       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
4029                                             true /*verify_no_refs*/);
4030 
4031       {
4032         // We want to temporarily turn off discovery by the
4033         // CM ref processor, if necessary, and turn it back on
4034         // on again later if we do. Using a scoped
4035         // NoRefDiscovery object will do this.
4036         NoRefDiscovery no_cm_discovery(ref_processor_cm());
4037 


4229         // the update buffers we'll probably need to scan cards on the
4230         // regions we just allocated to (i.e., the GC alloc
4231         // regions). However, during the last GC we called
4232         // set_saved_mark() on all the GC alloc regions, so card
4233         // scanning might skip the [saved_mark_word()...top()] area of
4234         // those regions (i.e., the area we allocated objects into
4235         // during the last GC). But it shouldn't. Given that
4236         // saved_mark_word() is conditional on whether the GC time stamp
4237         // on the region is current or not, by incrementing the GC time
4238         // stamp here we invalidate all the GC time stamps on all the
4239         // regions and saved_mark_word() will simply return top() for
4240         // all the regions. This is a nicer way of ensuring this rather
4241         // than iterating over the regions and fixing them. In fact, the
4242         // GC time stamp increment here also ensures that
4243         // saved_mark_word() will return top() between pauses, i.e.,
4244         // during concurrent refinement. So we don't need the
4245         // is_gc_active() check to decided which top to use when
4246         // scanning cards (see CR 7039627).
4247         increment_gc_time_stamp();
4248 








4249         verify_after_gc();
4250         check_bitmaps("GC End");
4251 
4252         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4253         ref_processor_stw()->verify_no_references_recorded();
4254 
4255         // CM reference discovery will be re-enabled if necessary.
4256       }
4257 
4258       // We should do this after we potentially expand the heap so
4259       // that all the COMMIT events are generated before the end GC
4260       // event, and after we retire the GC alloc regions so that all
4261       // RETIRE events are generated before the end GC event.
4262       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4263 
4264 #ifdef TRACESPINNING
4265       ParallelTaskTerminator::print_termination_counts();
4266 #endif
4267 
4268       gc_epilogue(false);




3822   }
3823 #endif // !ASSERT
3824 }
3825 
3826 void
3827 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3828   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3829   uint array_length = g1_policy()->young_cset_region_length();
3830   for (uint i = 0; i < array_length; ++i) {
3831     _surviving_young_words[i] += surv_young_words[i];
3832   }
3833 }
3834 
3835 void
3836 G1CollectedHeap::cleanup_surviving_young_words() {
3837   guarantee( _surviving_young_words != NULL, "pre-condition" );
3838   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3839   _surviving_young_words = NULL;
3840 }
3841 
3842 class VerifyRegionRemSetClosure : public HeapRegionClosure {
3843   public:
3844     bool doHeapRegion(HeapRegion* hr) {
3845       if (!hr->continuesHumongous()) {
3846         hr->verify_rem_set();
3847       }
3848       return false;
3849     }
3850 };
3851 
3852 #ifdef ASSERT
3853 class VerifyCSetClosure: public HeapRegionClosure {
3854 public:
3855   bool doHeapRegion(HeapRegion* hr) {
3856     // Here we check that the CSet region's RSet is ready for parallel
3857     // iteration. The fields that we'll verify are only manipulated
3858     // when the region is part of a CSet and is collected. Afterwards,
3859     // we reset these fields when we clear the region's RSet (when the
3860     // region is freed) so they are ready when the region is
3861     // re-allocated. The only exception to this is if there's an
3862     // evacuation failure and instead of freeing the region we leave
3863     // it in the heap. In that case, we reset these fields during
3864     // evacuation failure handling.
3865     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3866 
3867     // Here's a good place to add any other checks we'd like to
3868     // perform on CSet regions.
3869     return false;
3870   }
3871 };


4008     // get entries from the secondary_free_list.
4009     if (!G1StressConcRegionFreeing) {
4010       append_secondary_free_list_if_not_empty_with_lock();
4011     }
4012 
4013     assert(check_young_list_well_formed(), "young list should be well formed");
4014     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
4015            "sanity check");
4016 
4017     // Don't dynamically change the number of GC threads this early.  A value of
4018     // 0 is used to indicate serial work.  When parallel work is done,
4019     // it will be set.
4020 
4021     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
4022       IsGCActiveMark x;
4023 
4024       gc_prologue(false);
4025       increment_total_collections(false /* full gc */);
4026       increment_gc_time_stamp();
4027 
4028       if (VerifyRememberedSets) {
4029         if (PrintGCDetails) {
4030           gclog_or_tty->print_cr("[Verifying RemSets before GC]");
4031         }
4032         VerifyRegionRemSetClosure v_cl;
4033         heap_region_iterate(&v_cl);
4034       }
4035 
4036       verify_before_gc();
4037       check_bitmaps("GC Start");
4038 
4039       COMPILER2_PRESENT(DerivedPointerTable::clear());
4040 
4041       // Please see comment in g1CollectedHeap.hpp and
4042       // G1CollectedHeap::ref_processing_init() to see how
4043       // reference processing currently works in G1.
4044 
4045       // Enable discovery in the STW reference processor
4046       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
4047                                             true /*verify_no_refs*/);
4048 
4049       {
4050         // We want to temporarily turn off discovery by the
4051         // CM ref processor, if necessary, and turn it back on
4052         // on again later if we do. Using a scoped
4053         // NoRefDiscovery object will do this.
4054         NoRefDiscovery no_cm_discovery(ref_processor_cm());
4055 


4247         // the update buffers we'll probably need to scan cards on the
4248         // regions we just allocated to (i.e., the GC alloc
4249         // regions). However, during the last GC we called
4250         // set_saved_mark() on all the GC alloc regions, so card
4251         // scanning might skip the [saved_mark_word()...top()] area of
4252         // those regions (i.e., the area we allocated objects into
4253         // during the last GC). But it shouldn't. Given that
4254         // saved_mark_word() is conditional on whether the GC time stamp
4255         // on the region is current or not, by incrementing the GC time
4256         // stamp here we invalidate all the GC time stamps on all the
4257         // regions and saved_mark_word() will simply return top() for
4258         // all the regions. This is a nicer way of ensuring this rather
4259         // than iterating over the regions and fixing them. In fact, the
4260         // GC time stamp increment here also ensures that
4261         // saved_mark_word() will return top() between pauses, i.e.,
4262         // during concurrent refinement. So we don't need the
4263         // is_gc_active() check to decided which top to use when
4264         // scanning cards (see CR 7039627).
4265         increment_gc_time_stamp();
4266 
4267         if (VerifyRememberedSets) {
4268           if (PrintGCDetails) {
4269             gclog_or_tty->print_cr("[Verifying RemSets after GC]");
4270           }
4271           VerifyRegionRemSetClosure v_cl;
4272           heap_region_iterate(&v_cl);
4273         }
4274 
4275         verify_after_gc();
4276         check_bitmaps("GC End");
4277 
4278         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4279         ref_processor_stw()->verify_no_references_recorded();
4280 
4281         // CM reference discovery will be re-enabled if necessary.
4282       }
4283 
4284       // We should do this after we potentially expand the heap so
4285       // that all the COMMIT events are generated before the end GC
4286       // event, and after we retire the GC alloc regions so that all
4287       // RETIRE events are generated before the end GC event.
4288       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4289 
4290 #ifdef TRACESPINNING
4291       ParallelTaskTerminator::print_termination_counts();
4292 #endif
4293 
4294       gc_epilogue(false);