< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




2562   // Fill TLAB's and such
2563   double start = os::elapsedTime();
2564   accumulate_statistics_all_tlabs();
2565   ensure_parsability(true);
2566   g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2567 }
2568 
2569 void G1CollectedHeap::gc_epilogue(bool full) {
2570   // Update common counters.
2571   if (full) {
2572     // Update the number of full collections that have been completed.
2573     increment_old_marking_cycles_completed(false /* concurrent */);
2574   }
2575 
2576   // We are at the end of the GC. Total collections has already been increased.
2577   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2578 
2579   // FIXME: what is this about?
2580   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2581   // is set.
2582 #if defined(COMPILER2) || INCLUDE_JVMCI
2583   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2584 #endif
2585   // always_do_update_barrier = true;
2586 
2587   double start = os::elapsedTime();
2588   resize_all_tlabs();
2589   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2590 
2591   allocation_context_stats().update(full);
2592 
2593   MemoryService::track_memory_usage();
2594   // We have just completed a GC. Update the soft reference
2595   // policy with the new heap occupancy
2596   Universe::update_heap_info_at_gc();
2597 }
2598 
2599 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2600                                                uint gc_count_before,
2601                                                bool* succeeded,
2602                                                GCCause::Cause gc_cause) {


2975 
2976     // Don't dynamically change the number of GC threads this early.  A value of
2977     // 0 is used to indicate serial work.  When parallel work is done,
2978     // it will be set.
2979 
2980     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2981       IsGCActiveMark x;
2982 
2983       gc_prologue(false);
2984 
2985       if (VerifyRememberedSets) {
2986         log_info(gc, verify)("[Verifying RemSets before GC]");
2987         VerifyRegionRemSetClosure v_cl;
2988         heap_region_iterate(&v_cl);
2989       }
2990 
2991       _verifier->verify_before_gc();
2992 
2993       _verifier->check_bitmaps("GC Start");
2994 
2995 #if defined(COMPILER2) || INCLUDE_JVMCI
2996       DerivedPointerTable::clear();
2997 #endif
2998 
2999       // Please see comment in g1CollectedHeap.hpp and
3000       // G1CollectedHeap::ref_processing_init() to see how
3001       // reference processing currently works in G1.
3002 
3003       // Enable discovery in the STW reference processor
3004       if (g1_policy()->should_process_references()) {
3005         ref_processor_stw()->enable_discovery();
3006       } else {
3007         ref_processor_stw()->disable_discovery();
3008       }
3009 
3010       {
3011         // We want to temporarily turn off discovery by the
3012         // CM ref processor, if necessary, and turn it back on
3013         // on again later if we do. Using a scoped
3014         // NoRefDiscovery object will do this.
3015         NoRefDiscovery no_cm_discovery(ref_processor_cm());


4404   // RSets.
4405   if (g1_policy()->should_process_references()) {
4406     enqueue_discovered_references(per_thread_states);
4407   } else {
4408     g1_policy()->phase_times()->record_ref_enq_time(0);
4409   }
4410 
4411   _allocator->release_gc_alloc_regions(evacuation_info);
4412 
4413   merge_per_thread_state_info(per_thread_states);
4414 
4415   // Reset and re-enable the hot card cache.
4416   // Note the counts for the cards in the regions in the
4417   // collection set are reset when the collection set is freed.
4418   _hot_card_cache->reset_hot_cache();
4419   _hot_card_cache->set_use_cache(true);
4420 
4421   purge_code_root_memory();
4422 
4423   redirty_logged_cards();
4424 #if defined(COMPILER2) || INCLUDE_JVMCI
4425   double start = os::elapsedTime();
4426   DerivedPointerTable::update_pointers();
4427   g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
4428 #endif
4429   g1_policy()->print_age_table();
4430 }
4431 
4432 void G1CollectedHeap::record_obj_copy_mem_stats() {
4433   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4434 
4435   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4436                                                create_g1_evac_summary(&_old_evac_stats));
4437 }
4438 
4439 void G1CollectedHeap::free_region(HeapRegion* hr,
4440                                   FreeRegionList* free_list,
4441                                   bool skip_remset,
4442                                   bool skip_hot_card_cache,
4443                                   bool locked) {
4444   assert(!hr->is_free(), "the region should not be free");




2562   // Fill TLAB's and such
2563   double start = os::elapsedTime();
2564   accumulate_statistics_all_tlabs();
2565   ensure_parsability(true);
2566   g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2567 }
2568 
2569 void G1CollectedHeap::gc_epilogue(bool full) {
2570   // Update common counters.
2571   if (full) {
2572     // Update the number of full collections that have been completed.
2573     increment_old_marking_cycles_completed(false /* concurrent */);
2574   }
2575 
2576   // We are at the end of the GC. Total collections has already been increased.
2577   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2578 
2579   // FIXME: what is this about?
2580   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2581   // is set.
2582 #if COMPILER2_OR_JVMCI
2583   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2584 #endif
2585   // always_do_update_barrier = true;
2586 
2587   double start = os::elapsedTime();
2588   resize_all_tlabs();
2589   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2590 
2591   allocation_context_stats().update(full);
2592 
2593   MemoryService::track_memory_usage();
2594   // We have just completed a GC. Update the soft reference
2595   // policy with the new heap occupancy
2596   Universe::update_heap_info_at_gc();
2597 }
2598 
2599 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2600                                                uint gc_count_before,
2601                                                bool* succeeded,
2602                                                GCCause::Cause gc_cause) {


2975 
2976     // Don't dynamically change the number of GC threads this early.  A value of
2977     // 0 is used to indicate serial work.  When parallel work is done,
2978     // it will be set.
2979 
2980     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2981       IsGCActiveMark x;
2982 
2983       gc_prologue(false);
2984 
2985       if (VerifyRememberedSets) {
2986         log_info(gc, verify)("[Verifying RemSets before GC]");
2987         VerifyRegionRemSetClosure v_cl;
2988         heap_region_iterate(&v_cl);
2989       }
2990 
2991       _verifier->verify_before_gc();
2992 
2993       _verifier->check_bitmaps("GC Start");
2994 
2995 #if COMPILER2_OR_JVMCI
2996       DerivedPointerTable::clear();
2997 #endif
2998 
2999       // Please see comment in g1CollectedHeap.hpp and
3000       // G1CollectedHeap::ref_processing_init() to see how
3001       // reference processing currently works in G1.
3002 
3003       // Enable discovery in the STW reference processor
3004       if (g1_policy()->should_process_references()) {
3005         ref_processor_stw()->enable_discovery();
3006       } else {
3007         ref_processor_stw()->disable_discovery();
3008       }
3009 
3010       {
3011         // We want to temporarily turn off discovery by the
3012         // CM ref processor, if necessary, and turn it back on
3013         // on again later if we do. Using a scoped
3014         // NoRefDiscovery object will do this.
3015         NoRefDiscovery no_cm_discovery(ref_processor_cm());


4404   // RSets.
4405   if (g1_policy()->should_process_references()) {
4406     enqueue_discovered_references(per_thread_states);
4407   } else {
4408     g1_policy()->phase_times()->record_ref_enq_time(0);
4409   }
4410 
4411   _allocator->release_gc_alloc_regions(evacuation_info);
4412 
4413   merge_per_thread_state_info(per_thread_states);
4414 
4415   // Reset and re-enable the hot card cache.
4416   // Note the counts for the cards in the regions in the
4417   // collection set are reset when the collection set is freed.
4418   _hot_card_cache->reset_hot_cache();
4419   _hot_card_cache->set_use_cache(true);
4420 
4421   purge_code_root_memory();
4422 
4423   redirty_logged_cards();
4424 #if COMPILER2_OR_JVMCI
4425   double start = os::elapsedTime();
4426   DerivedPointerTable::update_pointers();
4427   g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
4428 #endif
4429   g1_policy()->print_age_table();
4430 }
4431 
4432 void G1CollectedHeap::record_obj_copy_mem_stats() {
4433   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4434 
4435   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4436                                                create_g1_evac_summary(&_old_evac_stats));
4437 }
4438 
4439 void G1CollectedHeap::free_region(HeapRegion* hr,
4440                                   FreeRegionList* free_list,
4441                                   bool skip_remset,
4442                                   bool skip_hot_card_cache,
4443                                   bool locked) {
4444   assert(!hr->is_free(), "the region should not be free");


< prev index next >