src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




3518   if (!G1EagerReclaimHumongousObjects) {
3519     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3520     return;
3521   }
3522   double time = os::elapsed_counter();
3523 
3524   // Collect reclaim candidate information and register candidates with cset.
3525   RegisterHumongousWithInCSetFastTestClosure cl;
3526   heap_region_iterate(&cl);
3527 
3528   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3529   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3530                                                                   cl.total_humongous(),
3531                                                                   cl.candidate_humongous());
3532   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3533 
3534   // Finally flush all remembered set entries to re-check into the global DCQS.
3535   cl.flush_rem_set_entries();
3536 }
3537 










3538 #ifdef ASSERT
3539 class VerifyCSetClosure: public HeapRegionClosure {
3540 public:
3541   bool doHeapRegion(HeapRegion* hr) {
3542     // Here we check that the CSet region's RSet is ready for parallel
3543     // iteration. The fields that we'll verify are only manipulated
3544     // when the region is part of a CSet and is collected. Afterwards,
3545     // we reset these fields when we clear the region's RSet (when the
3546     // region is freed) so they are ready when the region is
3547     // re-allocated. The only exception to this is if there's an
3548     // evacuation failure and instead of freeing the region we leave
3549     // it in the heap. In that case, we reset these fields during
3550     // evacuation failure handling.
3551     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3552 
3553     // Here's a good place to add any other checks we'd like to
3554     // perform on CSet regions.
3555     return false;
3556   }
3557 };


3707     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3708     // set, skip this step so that the region allocation code has to
3709     // get entries from the secondary_free_list.
3710     if (!G1StressConcRegionFreeing) {
3711       append_secondary_free_list_if_not_empty_with_lock();
3712     }
3713 
3714     assert(check_young_list_well_formed(), "young list should be well formed");
3715 
3716     // Don't dynamically change the number of GC threads this early.  A value of
3717     // 0 is used to indicate serial work.  When parallel work is done,
3718     // it will be set.
3719 
3720     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3721       IsGCActiveMark x;
3722 
3723       gc_prologue(false);
3724       increment_total_collections(false /* full gc */);
3725       increment_gc_time_stamp();
3726 






3727       verify_before_gc();
3728 
3729       check_bitmaps("GC Start");
3730 
3731 #if defined(COMPILER2) || INCLUDE_JVMCI
3732       DerivedPointerTable::clear();
3733 #endif
3734 
3735       // Please see comment in g1CollectedHeap.hpp and
3736       // G1CollectedHeap::ref_processing_init() to see how
3737       // reference processing currently works in G1.
3738 
3739       // Enable discovery in the STW reference processor
3740       if (g1_policy()->should_process_references()) {
3741         ref_processor_stw()->enable_discovery();
3742       } else {
3743         ref_processor_stw()->disable_discovery();
3744       }
3745 
3746       {


3911         // the update buffers we'll probably need to scan cards on the
3912         // regions we just allocated to (i.e., the GC alloc
3913         // regions). However, during the last GC we called
3914         // set_saved_mark() on all the GC alloc regions, so card
3915         // scanning might skip the [saved_mark_word()...top()] area of
3916         // those regions (i.e., the area we allocated objects into
3917         // during the last GC). But it shouldn't. Given that
3918         // saved_mark_word() is conditional on whether the GC time stamp
3919         // on the region is current or not, by incrementing the GC time
3920         // stamp here we invalidate all the GC time stamps on all the
3921         // regions and saved_mark_word() will simply return top() for
3922         // all the regions. This is a nicer way of ensuring this rather
3923         // than iterating over the regions and fixing them. In fact, the
3924         // GC time stamp increment here also ensures that
3925         // saved_mark_word() will return top() between pauses, i.e.,
3926         // during concurrent refinement. So we don't need the
3927         // is_gc_active() check to decided which top to use when
3928         // scanning cards (see CR 7039627).
3929         increment_gc_time_stamp();
3930 






3931         verify_after_gc();
3932         check_bitmaps("GC End");
3933 
3934         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3935         ref_processor_stw()->verify_no_references_recorded();
3936 
3937         // CM reference discovery will be re-enabled if necessary.
3938       }
3939 
3940 #ifdef TRACESPINNING
3941       ParallelTaskTerminator::print_termination_counts();
3942 #endif
3943 
3944       gc_epilogue(false);
3945     }
3946 
3947     // Print the remainder of the GC log output.
3948     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3949 
3950     // It is not yet to safe to tell the concurrent mark to




3518   if (!G1EagerReclaimHumongousObjects) {
3519     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3520     return;
3521   }
3522   double time = os::elapsed_counter();
3523 
3524   // Collect reclaim candidate information and register candidates with cset.
3525   RegisterHumongousWithInCSetFastTestClosure cl;
3526   heap_region_iterate(&cl);
3527 
3528   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3529   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3530                                                                   cl.total_humongous(),
3531                                                                   cl.candidate_humongous());
3532   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3533 
3534   // Finally flush all remembered set entries to re-check into the global DCQS.
3535   cl.flush_rem_set_entries();
3536 }
3537 
3538 class VerifyRegionRemSetClosure : public HeapRegionClosure {
3539   public:
3540     bool doHeapRegion(HeapRegion* hr) {
3541       if (!hr->is_archive() && !hr->is_continues_humongous()) {
3542         hr->verify_rem_set();        
3543       }
3544       return false;
3545     }
3546 };
3547 
3548 #ifdef ASSERT
3549 class VerifyCSetClosure: public HeapRegionClosure {
3550 public:
3551   bool doHeapRegion(HeapRegion* hr) {
3552     // Here we check that the CSet region's RSet is ready for parallel
3553     // iteration. The fields that we'll verify are only manipulated
3554     // when the region is part of a CSet and is collected. Afterwards,
3555     // we reset these fields when we clear the region's RSet (when the
3556     // region is freed) so they are ready when the region is
3557     // re-allocated. The only exception to this is if there's an
3558     // evacuation failure and instead of freeing the region we leave
3559     // it in the heap. In that case, we reset these fields during
3560     // evacuation failure handling.
3561     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3562 
3563     // Here's a good place to add any other checks we'd like to
3564     // perform on CSet regions.
3565     return false;
3566   }
3567 };


3717     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3718     // set, skip this step so that the region allocation code has to
3719     // get entries from the secondary_free_list.
3720     if (!G1StressConcRegionFreeing) {
3721       append_secondary_free_list_if_not_empty_with_lock();
3722     }
3723 
3724     assert(check_young_list_well_formed(), "young list should be well formed");
3725 
3726     // Don't dynamically change the number of GC threads this early.  A value of
3727     // 0 is used to indicate serial work.  When parallel work is done,
3728     // it will be set.
3729 
3730     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3731       IsGCActiveMark x;
3732 
3733       gc_prologue(false);
3734       increment_total_collections(false /* full gc */);
3735       increment_gc_time_stamp();
3736 
3737       if (VerifyRememberedSets) {
3738         log_info(gc, verify)("[Verifying RemSets before GC]");
3739         VerifyRegionRemSetClosure v_cl;
3740         heap_region_iterate(&v_cl);
3741       }
3742 
3743       verify_before_gc();
3744 
3745       check_bitmaps("GC Start");
3746 
3747 #if defined(COMPILER2) || INCLUDE_JVMCI
3748       DerivedPointerTable::clear();
3749 #endif
3750 
3751       // Please see comment in g1CollectedHeap.hpp and
3752       // G1CollectedHeap::ref_processing_init() to see how
3753       // reference processing currently works in G1.
3754 
3755       // Enable discovery in the STW reference processor
3756       if (g1_policy()->should_process_references()) {
3757         ref_processor_stw()->enable_discovery();
3758       } else {
3759         ref_processor_stw()->disable_discovery();
3760       }
3761 
3762       {


3927         // the update buffers we'll probably need to scan cards on the
3928         // regions we just allocated to (i.e., the GC alloc
3929         // regions). However, during the last GC we called
3930         // set_saved_mark() on all the GC alloc regions, so card
3931         // scanning might skip the [saved_mark_word()...top()] area of
3932         // those regions (i.e., the area we allocated objects into
3933         // during the last GC). But it shouldn't. Given that
3934         // saved_mark_word() is conditional on whether the GC time stamp
3935         // on the region is current or not, by incrementing the GC time
3936         // stamp here we invalidate all the GC time stamps on all the
3937         // regions and saved_mark_word() will simply return top() for
3938         // all the regions. This is a nicer way of ensuring this rather
3939         // than iterating over the regions and fixing them. In fact, the
3940         // GC time stamp increment here also ensures that
3941         // saved_mark_word() will return top() between pauses, i.e.,
3942         // during concurrent refinement. So we don't need the
3943         // is_gc_active() check to decided which top to use when
3944         // scanning cards (see CR 7039627).
3945         increment_gc_time_stamp();
3946 
3947         if (VerifyRememberedSets) {
3948           log_info(gc, verify)("[Verifying RemSets after GC]");
3949           VerifyRegionRemSetClosure v_cl;
3950           heap_region_iterate(&v_cl);
3951         }
3952 
3953         verify_after_gc();
3954         check_bitmaps("GC End");
3955 
3956         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3957         ref_processor_stw()->verify_no_references_recorded();
3958 
3959         // CM reference discovery will be re-enabled if necessary.
3960       }
3961 
3962 #ifdef TRACESPINNING
3963       ParallelTaskTerminator::print_termination_counts();
3964 #endif
3965 
3966       gc_epilogue(false);
3967     }
3968 
3969     // Print the remainder of the GC log output.
3970     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3971 
3972     // It is not yet to safe to tell the concurrent mark to